code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_lowercase = logging.get_logger(__name__)
_lowercase = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''dpt'''
def __init__( self , a__=7_6_8 , a__=1_2 , a__=1_2 , a__=3_0_7_2 , a__="gelu" , a__=0.0 , a__=0.0 , a__=0.0_2 , a__=1e-12 , a__=3_8_4 , a__=1_6 , a__=3 , a__=False , a__=True , a__=[2, 5, 8, 1_1] , a__="project" , a__=[4, 2, 1, 0.5] , a__=[9_6, 1_9_2, 3_8_4, 7_6_8] , a__=2_5_6 , a__=-1 , a__=False , a__=True , a__=0.4 , a__=2_5_5 , a__=0.1 , a__=[1, 1_0_2_4, 2_4, 2_4] , a__=[0, 1] , a__=None , **a__ , ):
super().__init__(**a__)
A__ = hidden_size
A__ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''')
A__ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
A__ = BitConfig(**a__)
elif isinstance(a__ , a__):
logger.info('''Initializing the config with a `BiT` backbone.''')
A__ = BitConfig(**a__)
elif isinstance(a__ , a__):
A__ = backbone_config
else:
raise ValueError(
F"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.")
A__ = backbone_featmap_shape
A__ = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''')
else:
A__ = None
A__ = None
A__ = []
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = qkv_bias
A__ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''')
A__ = readout_type
A__ = reassemble_factors
A__ = neck_hidden_sizes
A__ = fusion_hidden_size
A__ = head_in_index
A__ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
A__ = use_auxiliary_head
A__ = auxiliary_loss_weight
A__ = semantic_loss_ignore_index
A__ = semantic_classifier_dropout
def snake_case_ ( self):
A__ = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
A__ = self.backbone_config.to_dict()
A__ = self.__class__.model_type
return output
| 632
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''timm_backbone'''
def __init__( self , a__=None , a__=3 , a__=True , a__=True , a__=None , **a__ , ):
super().__init__(**a__)
A__ = backbone
A__ = num_channels
A__ = features_only
A__ = use_pretrained_backbone
A__ = True
A__ = out_indices if out_indices is not None else (-1,)
| 632
| 1
|
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
def __init__( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None ) -> List[Any]:
_a = data
_a = previous
_a = next_node
def __str__( self ) -> str:
return f"{self.data}"
def a_ ( self ) -> int:
return self.data
def a_ ( self ) -> List[str]:
return self.next
def a_ ( self ) -> Any:
return self.previous
class __SCREAMING_SNAKE_CASE :
def __init__( self , __UpperCamelCase ) -> Union[str, Any]:
_a = head
def __iter__( self ) -> Optional[Any]:
return self
def a_ ( self ) -> Optional[int]:
if not self.current:
raise StopIteration
else:
_a = self.current.get_data()
_a = self.current.get_next()
return value
class __SCREAMING_SNAKE_CASE :
def __init__( self ) -> List[Any]:
_a = None # First node in list
_a = None # Last node in list
def __str__( self ) -> str:
_a = self.head
_a = []
while current is not None:
nodes.append(current.get_data() )
_a = current.get_next()
return " ".join(str(__UpperCamelCase ) for node in nodes )
def __contains__( self , __UpperCamelCase ) -> Tuple:
_a = self.head
while current:
if current.get_data() == value:
return True
_a = current.get_next()
return False
def __iter__( self ) -> Optional[int]:
return LinkedListIterator(self.head )
def a_ ( self ) -> Optional[Any]:
if self.head:
return self.head.get_data()
return None
def a_ ( self ) -> int:
if self.tail:
return self.tail.get_data()
return None
def a_ ( self , __UpperCamelCase ) -> None:
if self.head is None:
_a = node
_a = node
else:
self.insert_before_node(self.head , __UpperCamelCase )
def a_ ( self , __UpperCamelCase ) -> None:
if self.head is None:
self.set_head(__UpperCamelCase )
else:
self.insert_after_node(self.tail , __UpperCamelCase )
def a_ ( self , __UpperCamelCase ) -> None:
_a = Node(__UpperCamelCase )
if self.head is None:
self.set_head(__UpperCamelCase )
else:
self.set_tail(__UpperCamelCase )
def a_ ( self , __UpperCamelCase , __UpperCamelCase ) -> None:
_a = node
_a = node.previous
if node.get_previous() is None:
_a = node_to_insert
else:
_a = node_to_insert
_a = node_to_insert
def a_ ( self , __UpperCamelCase , __UpperCamelCase ) -> None:
_a = node
_a = node.next
if node.get_next() is None:
_a = node_to_insert
else:
_a = node_to_insert
_a = node_to_insert
def a_ ( self , __UpperCamelCase , __UpperCamelCase ) -> None:
_a = 1
_a = Node(__UpperCamelCase )
_a = self.head
while node:
if current_position == position:
self.insert_before_node(__UpperCamelCase , __UpperCamelCase )
return
current_position += 1
_a = node.next
self.insert_after_node(self.tail , __UpperCamelCase )
def a_ ( self , __UpperCamelCase ) -> Node:
_a = self.head
while node:
if node.get_data() == item:
return node
_a = node.get_next()
raise Exception("Node not found" )
def a_ ( self , __UpperCamelCase ) -> Optional[int]:
if (node := self.get_node(__UpperCamelCase )) is not None:
if node == self.head:
_a = self.head.get_next()
if node == self.tail:
_a = self.tail.get_previous()
self.remove_node_pointers(__UpperCamelCase )
@staticmethod
def a_ ( __UpperCamelCase ) -> None:
if node.get_next():
_a = node.previous
if node.get_previous():
_a = node.next
_a = None
_a = None
def a_ ( self ) -> str:
return self.head is None
def __UpperCamelCase ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276
|
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __UpperCamelCase ( __lowerCamelCase : BertModel , __lowerCamelCase : str , __lowerCamelCase : str ) -> List[str]:
'''simple docstring'''
_a = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_a = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
_a = model.state_dict()
def to_tf_var_name(__lowerCamelCase : str ):
for patt, repl in iter(__lowerCamelCase ):
_a = name.replace(__lowerCamelCase , __lowerCamelCase )
return F"bert/{name}"
def create_tf_var(__lowerCamelCase : np.ndarray , __lowerCamelCase : str , __lowerCamelCase : tf.Session ):
_a = tf.dtypes.as_dtype(tensor.dtype )
_a = tf.get_variable(dtype=__lowerCamelCase , shape=tensor.shape , name=__lowerCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__lowerCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_a = to_tf_var_name(__lowerCamelCase )
_a = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_a = torch_tensor.T
_a = create_tf_var(tensor=__lowerCamelCase , name=__lowerCamelCase , session=__lowerCamelCase )
tf.keras.backend.set_value(__lowerCamelCase , __lowerCamelCase )
_a = session.run(__lowerCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(__lowerCamelCase , __lowerCamelCase )}" )
_a = tf.train.Saver(tf.trainable_variables() )
saver.save(__lowerCamelCase , os.path.join(__lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def __UpperCamelCase ( __lowerCamelCase : str=None ) -> Optional[int]:
'''simple docstring'''
_a = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__lowerCamelCase , required=__lowerCamelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__lowerCamelCase , default=__lowerCamelCase , required=__lowerCamelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__lowerCamelCase , required=__lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__lowerCamelCase , required=__lowerCamelCase , help="Directory in which to save tensorflow model" )
_a = parser.parse_args(__lowerCamelCase )
_a = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 276
| 1
|
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
# Load checkpoint
_SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )
_SCREAMING_SNAKE_CASE = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
_SCREAMING_SNAKE_CASE = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_SCREAMING_SNAKE_CASE = v
else:
_SCREAMING_SNAKE_CASE = v
_SCREAMING_SNAKE_CASE = chkpt["""params"""]
_SCREAMING_SNAKE_CASE = {n: v for n, v in config.items() if not isinstance(SCREAMING_SNAKE_CASE_ , (torch.FloatTensor, numpy.ndarray) )}
_SCREAMING_SNAKE_CASE = chkpt["""dico_word2id"""]
_SCREAMING_SNAKE_CASE = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()}
# Save pytorch-model
_SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
_SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + """/""" + CONFIG_NAME
_SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 ) + """\n""" )
print(F"Save vocab file to {pytorch_config_dump_path}" )
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 ) + """\n""" )
if __name__ == "__main__":
UpperCamelCase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase__ : Optional[int] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 591
|
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCamelCase__ : Optional[Any] = logging.getLogger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = bnb_quantization_config.load_in_abit
_SCREAMING_SNAKE_CASE = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
_SCREAMING_SNAKE_CASE = []
# custom device map
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(device_map.keys() ) > 1:
_SCREAMING_SNAKE_CASE = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_SCREAMING_SNAKE_CASE = get_keys_to_not_convert(SCREAMING_SNAKE_CASE_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(SCREAMING_SNAKE_CASE_ )
# compatibility with peft
_SCREAMING_SNAKE_CASE = load_in_abit
_SCREAMING_SNAKE_CASE = load_in_abit
_SCREAMING_SNAKE_CASE = get_parameter_device(SCREAMING_SNAKE_CASE_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
_SCREAMING_SNAKE_CASE = replace_with_bnb_layers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , modules_to_not_convert=SCREAMING_SNAKE_CASE_ )
# convert param to the right dtype
_SCREAMING_SNAKE_CASE = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_SCREAMING_SNAKE_CASE = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
_SCREAMING_SNAKE_CASE = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
param.to(SCREAMING_SNAKE_CASE_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
F"The model device type is {model_device.type}. However, cuda is needed for quantization."
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
F"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " )
else:
with init_empty_weights():
_SCREAMING_SNAKE_CASE = replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , modules_to_not_convert=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = get_quantized_model_device_map(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , max_memory=SCREAMING_SNAKE_CASE_ , no_split_module_classes=SCREAMING_SNAKE_CASE_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE_ , offload_state_dict=SCREAMING_SNAKE_CASE_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(SCREAMING_SNAKE_CASE_ , device_map=SCREAMING_SNAKE_CASE_ , offload_dir=SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) -> List[str]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
_SCREAMING_SNAKE_CASE = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
_SCREAMING_SNAKE_CASE = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = special_dtypes
_SCREAMING_SNAKE_CASE = no_split_module_classes
_SCREAMING_SNAKE_CASE = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_SCREAMING_SNAKE_CASE = get_balanced_memory(
SCREAMING_SNAKE_CASE_ , low_zero=(device_map == """balanced_low_0""") , max_memory=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
_SCREAMING_SNAKE_CASE = max_memory
_SCREAMING_SNAKE_CASE = infer_auto_device_map(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# check if don't have any quantized module on the cpu
_SCREAMING_SNAKE_CASE = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_SCREAMING_SNAKE_CASE = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = False
for name, module in model.named_children():
if current_key_name is None:
_SCREAMING_SNAKE_CASE = []
current_key_name.append(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_SCREAMING_SNAKE_CASE = """.""".join(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_SCREAMING_SNAKE_CASE = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_SCREAMING_SNAKE_CASE = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_SCREAMING_SNAKE_CASE = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
_SCREAMING_SNAKE_CASE = module.weight.data
if module.bias is not None:
_SCREAMING_SNAKE_CASE = module.bias.data
bnb_module.requires_grad_(SCREAMING_SNAKE_CASE_ )
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = True
if len(list(module.children() ) ) > 0:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
# Create a copy of the model
with init_empty_weights():
_SCREAMING_SNAKE_CASE = deepcopy(SCREAMING_SNAKE_CASE_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_SCREAMING_SNAKE_CASE = find_tied_parameters(SCREAMING_SNAKE_CASE_ )
# For compatibility with Accelerate < 0.18
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_SCREAMING_SNAKE_CASE = sum(SCREAMING_SNAKE_CASE_ , [] )
_SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) > 0
# Check if it is a base model
_SCREAMING_SNAKE_CASE = False
if hasattr(SCREAMING_SNAKE_CASE_ , """base_model_prefix""" ):
_SCREAMING_SNAKE_CASE = not hasattr(SCREAMING_SNAKE_CASE_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_SCREAMING_SNAKE_CASE = list(model.named_children() )
_SCREAMING_SNAKE_CASE = [list_modules[-1][0]]
# add last module together with tied weights
_SCREAMING_SNAKE_CASE = set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = list(set(SCREAMING_SNAKE_CASE_ ) ) + list(SCREAMING_SNAKE_CASE_ )
# remove ".weight" from the keys
_SCREAMING_SNAKE_CASE = [""".weight""", """.bias"""]
_SCREAMING_SNAKE_CASE = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_SCREAMING_SNAKE_CASE = name.replace(SCREAMING_SNAKE_CASE_ , """""" )
filtered_module_names.append(SCREAMING_SNAKE_CASE_ )
return filtered_module_names
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
for m in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , bnb.nn.Linearabit ):
return True
return False
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
"""simple docstring"""
return next(parameter.parameters() ).device
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0 , dtype=SCREAMING_SNAKE_CASE_ , value=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = param_name
_SCREAMING_SNAKE_CASE = model
if "." in tensor_name:
_SCREAMING_SNAKE_CASE = tensor_name.split(""".""" )
for split in splits[:-1]:
_SCREAMING_SNAKE_CASE = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
_SCREAMING_SNAKE_CASE = new_module
_SCREAMING_SNAKE_CASE = splits[-1]
# offload weights
_SCREAMING_SNAKE_CASE = False
offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , )
else:
offload_weight(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
offload_weight(SCREAMING_SNAKE_CASE_ , param_name.replace("""weight""" , """SCB""" ) , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
set_module_tensor_to_device(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """meta""" , dtype=SCREAMING_SNAKE_CASE_ , value=torch.empty(*param.size() ) )
| 591
| 1
|
def a__ (__lowercase :Optional[Any] ) -> Dict:
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
_A : Optional[int] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
_A : Any = 1
if upper_limit > 0:
_A : Union[str, Any] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__lowercase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
_UpperCamelCase : str =int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 701
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_UpperCamelCase : Union[str, Any] ='\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
_UpperCamelCase : List[str] ='\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
_UpperCamelCase : str ='\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def a__ (__lowercase :List[Any] , __lowercase :List[Any] ) -> Union[str, Any]:
return float((preds == labels).mean() )
def a__ (__lowercase :Tuple , __lowercase :List[Any] , __lowercase :Union[str, Any]="binary" ) -> Optional[Any]:
_A : Union[str, Any] = simple_accuracy(__lowercase , __lowercase )
_A : str = float(fa_score(y_true=__lowercase , y_pred=__lowercase , average=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a__ (__lowercase :List[str] , __lowercase :Optional[Any] ) -> List[str]:
_A : str = {}
for id_pred, label in zip(__lowercase , __lowercase ):
_A : Optional[int] = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
_A : Tuple = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_A : Union[str, Any] = [(pred, label)]
_A , _A : List[Any] = [], []
for question, preds_labels in question_map.items():
_A , _A : List[str] = zip(*__lowercase )
_A : Union[str, Any] = fa_score(y_true=__lowercase , y_pred=__lowercase , average='''macro''' )
fas.append(__lowercase )
_A : Optional[Any] = int(sum(pred == label for pred, label in preds_labels ) == len(__lowercase ) )
ems.append(__lowercase )
_A : Optional[int] = float(sum(__lowercase ) / len(__lowercase ) )
_A : Dict = sum(__lowercase ) / len(__lowercase )
_A : List[Any] = float(fa_score(y_true=__lowercase , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def A__ ( self ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None ,)
def A__ ( self ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def A__ ( self ,A__ ,A__ ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(A__ ,A__ )}
elif self.config_name == "cb":
return acc_and_fa(A__ ,A__ ,fa_avg='''macro''' )
elif self.config_name == "record":
_A : Any = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
_A : int = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(A__ ,A__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(A__ ,A__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(A__ ,A__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 332
| 0
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase : Optional[int] = 16
lowercase : Tuple = 32
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return int(x / 2**20 )
class A :
def __enter__( self ) -> int:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
A : Dict = torch.cuda.memory_allocated()
return self
def __exit__( self , *SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
A : List[str] = torch.cuda.memory_allocated()
A : Tuple = torch.cuda.max_memory_allocated()
A : int = bamb(self.end - self.begin )
A : List[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCAmelCase_ ( snake_case__ , snake_case__ = 16 , snake_case__ = "bert-base-cased" , snake_case__ = 320 , snake_case__ = 160 , ):
'''simple docstring'''
A : Optional[int] = AutoTokenizer.from_pretrained(snake_case__ )
A : Any = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': F'train[:{n_train}]', '''validation''': F'validation[:{n_val}]'} )
def tokenize_function(snake_case__ ):
# max_length=None => use the model max length (it's actually the default)
A : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A : Optional[Any] = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A : List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(snake_case__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
A : List[str] = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
A : Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : int = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A : Optional[Any] = config['lr']
A : Tuple = int(config['''num_epochs'''] )
A : Dict = int(config['''seed'''] )
A : int = int(config['''batch_size'''] )
A : List[Any] = args.model_name_or_path
set_seed(snake_case__ )
A : Union[str, Any] = get_dataloaders(snake_case__ , snake_case__ , snake_case__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ )
# Instantiate optimizer
A : Any = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A : Tuple = optimizer_cls(params=model.parameters() , lr=snake_case__ )
if accelerator.state.deepspeed_plugin is not None:
A : List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
A : Any = 1
A : Union[str, Any] = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A : List[Any] = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , )
else:
A : Optional[int] = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A : Tuple = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# We need to keep track of how many total steps we have iterated over
A : Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
A : List[Any] = 0
# Now we train the model
A : Tuple = {}
for epoch in range(snake_case__ , snake_case__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(snake_case__ ):
A : int = model(**snake_case__ )
A : str = outputs.loss
A : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
A : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'epoch-{epoch}'] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : int = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case__ , )
parser.add_argument(
'''--output_dir''' , type=snake_case__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=snake_case__ , default=snake_case__ , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=snake_case__ , default=320 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=snake_case__ , default=160 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=snake_case__ , default=1 , help='''Number of train epochs.''' , )
A : Union[str, Any] = parser.parse_args()
A : Tuple = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 634
|
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def lowerCamelCase ( ):
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 561
| 0
|
def a ( A__ , A__ , A__ , A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [False] * len(A__ )
SCREAMING_SNAKE_CASE__ : Dict = []
queue.append(A__ )
SCREAMING_SNAKE_CASE__ : Tuple = True
while queue:
SCREAMING_SNAKE_CASE__ : Dict = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(A__ )
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Tuple = u
return visited[t]
def a ( A__ , A__ , A__ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = [-1] * (len(A__ ))
SCREAMING_SNAKE_CASE__ : Dict = 0
while bfs(A__ , A__ , A__ , A__ ):
SCREAMING_SNAKE_CASE__ : str = float('''Inf''' )
SCREAMING_SNAKE_CASE__ : int = sink
while s != source:
# Find the minimum value in select path
SCREAMING_SNAKE_CASE__ : Any = min(A__ , graph[parent[s]][s] )
SCREAMING_SNAKE_CASE__ : Any = parent[s]
max_flow += path_flow
SCREAMING_SNAKE_CASE__ : Optional[int] = sink
while v != source:
SCREAMING_SNAKE_CASE__ : Dict = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
SCREAMING_SNAKE_CASE__ : int = parent[v]
return max_flow
a_ :Any = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
a_ , a_ :List[str] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 250
|
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
a_ :str = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def a ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__=False , ) -> Optional[Any]:
'''simple docstring'''
output_path.parent.mkdir(parents=A__ , exist_ok=A__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , use_external_data_format=A__ , enable_onnx_checker=A__ , opset_version=A__ , )
else:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , opset_version=A__ , )
@torch.no_grad()
def a ( A__ , A__ , A__ , A__ = False ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ : List[str] = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = '''cpu'''
SCREAMING_SNAKE_CASE__ : str = StableDiffusionPipeline.from_pretrained(A__ , torch_dtype=A__ ).to(A__ )
SCREAMING_SNAKE_CASE__ : int = Path(A__ )
# TEXT ENCODER
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline.text_encoder.config.max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = pipeline.text_encoder.config.hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=A__ , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=A__ , dtype=torch.intaa )) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=A__ , )
del pipeline.text_encoder
# UNET
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline.unet.config.in_channels
SCREAMING_SNAKE_CASE__ : Tuple = pipeline.unet.config.sample_size
SCREAMING_SNAKE_CASE__ : Dict = output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , A__ , A__ , A__ ).to(device=A__ , dtype=A__ ),
torch.randn(2 ).to(device=A__ , dtype=A__ ),
torch.randn(2 , A__ , A__ ).to(device=A__ , dtype=A__ ),
False,
) , output_path=A__ , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=A__ , use_external_data_format=A__ , )
SCREAMING_SNAKE_CASE__ : List[str] = str(unet_path.absolute().as_posix() )
SCREAMING_SNAKE_CASE__ : List[str] = os.path.dirname(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = onnx.load(A__ )
# clean up existing tensor files
shutil.rmtree(A__ )
os.mkdir(A__ )
# collate external tensor files into one
onnx.save_model(
A__ , A__ , save_as_external_data=A__ , all_tensors_to_one_file=A__ , location='''weights.pb''' , convert_attribute=A__ , )
del pipeline.unet
# VAE ENCODER
SCREAMING_SNAKE_CASE__ : Optional[int] = pipeline.vae
SCREAMING_SNAKE_CASE__ : str = vae_encoder.config.in_channels
SCREAMING_SNAKE_CASE__ : str = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
SCREAMING_SNAKE_CASE__ : Dict = lambda A__ , A__ : vae_encoder.encode(A__ , A__ )[0].sample()
onnx_export(
A__ , model_args=(
torch.randn(1 , A__ , A__ , A__ ).to(device=A__ , dtype=A__ ),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=A__ , )
# VAE DECODER
SCREAMING_SNAKE_CASE__ : Tuple = pipeline.vae
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vae_decoder.config.latent_channels
SCREAMING_SNAKE_CASE__ : Dict = vae_decoder.config.out_channels
# forward only through the decoder part
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vae_encoder.decode
onnx_export(
A__ , model_args=(
torch.randn(1 , A__ , A__ , A__ ).to(device=A__ , dtype=A__ ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=A__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
SCREAMING_SNAKE_CASE__ : int = pipeline.safety_checker
SCREAMING_SNAKE_CASE__ : int = safety_checker.config.vision_config.num_channels
SCREAMING_SNAKE_CASE__ : Dict = safety_checker.config.vision_config.image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , A__ , A__ , A__ , ).to(device=A__ , dtype=A__ ),
torch.randn(1 , A__ , A__ , A__ ).to(device=A__ , dtype=A__ ),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=A__ , )
del pipeline.safety_checker
SCREAMING_SNAKE_CASE__ : Optional[int] = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''' )
SCREAMING_SNAKE_CASE__ : str = pipeline.feature_extractor
else:
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : List[Any] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''' ) , scheduler=pipeline.scheduler , safety_checker=A__ , feature_extractor=A__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(A__ )
print('''ONNX pipeline saved to''' , A__ )
del pipeline
del onnx_pipeline
SCREAMING_SNAKE_CASE__ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(A__ , provider='''CPUExecutionProvider''' )
print('''ONNX pipeline is loadable''' )
if __name__ == "__main__":
a_ :List[str] = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
a_ :Optional[Any] = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 250
| 1
|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __magic_name__ (__lowerCamelCase ,unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = ConsistencyModelPipeline
__lowercase : Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__lowercase : int = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__lowercase : List[Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any]=False ):
if class_cond:
snake_case__ = self.dummy_cond_unet
else:
snake_case__ = self.dummy_uncond_unet
# Default to CM multistep sampler
snake_case__ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
snake_case__ = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Union[str, Any] , _a:Optional[Any]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case__ = self.get_dummy_components()
snake_case__ = ConsistencyModelPipeline(**_a )
snake_case__ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = pipe(**_a ).images
assert image.shape == (1, 32, 32, 3)
snake_case__ = image[0, -3:, -3:, -1]
snake_case__ = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case__ = self.get_dummy_components(class_cond=_a )
snake_case__ = ConsistencyModelPipeline(**_a )
snake_case__ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = 0
snake_case__ = pipe(**_a ).images
assert image.shape == (1, 32, 32, 3)
snake_case__ = image[0, -3:, -3:, -1]
snake_case__ = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case__ = self.get_dummy_components()
snake_case__ = ConsistencyModelPipeline(**_a )
snake_case__ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = 1
snake_case__ = None
snake_case__ = pipe(**_a ).images
assert image.shape == (1, 32, 32, 3)
snake_case__ = image[0, -3:, -3:, -1]
snake_case__ = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case__ = self.get_dummy_components(class_cond=_a )
snake_case__ = ConsistencyModelPipeline(**_a )
snake_case__ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = 1
snake_case__ = None
snake_case__ = 0
snake_case__ = pipe(**_a ).images
assert image.shape == (1, 32, 32, 3)
snake_case__ = image[0, -3:, -3:, -1]
snake_case__ = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Any=0 , _a:Optional[int]=False , _a:Optional[Any]="cpu" , _a:Any=torch.floataa , _a:Any=(1, 3, 64, 64) ):
snake_case__ = torch.manual_seed(_a )
snake_case__ = {
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
snake_case__ = self.get_fixed_latents(seed=_a , device=_a , dtype=_a , shape=_a )
snake_case__ = latents
return inputs
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:Tuple=0 , _a:Any="cpu" , _a:Dict=torch.floataa , _a:Union[str, Any]=(1, 3, 64, 64) ):
if type(_a ) == str:
snake_case__ = torch.device(_a )
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
return latents
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
snake_case__ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
snake_case__ = ConsistencyModelPipeline(unet=_a , scheduler=_a )
pipe.to(torch_device=_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_inputs()
snake_case__ = pipe(**_a ).images
assert image.shape == (1, 64, 64, 3)
snake_case__ = image[0, -3:, -3:, -1]
snake_case__ = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
snake_case__ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
snake_case__ = ConsistencyModelPipeline(unet=_a , scheduler=_a )
pipe.to(torch_device=_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_inputs()
snake_case__ = 1
snake_case__ = None
snake_case__ = pipe(**_a ).images
assert image.shape == (1, 64, 64, 3)
snake_case__ = image[0, -3:, -3:, -1]
snake_case__ = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
snake_case__ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
snake_case__ = ConsistencyModelPipeline(unet=_a , scheduler=_a )
pipe.to(torch_device=_a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_inputs(get_fixed_latents=_a , device=_a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_a , enable_math=_a , enable_mem_efficient=_a ):
snake_case__ = pipe(**_a ).images
assert image.shape == (1, 64, 64, 3)
snake_case__ = image[0, -3:, -3:, -1]
snake_case__ = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
snake_case__ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
snake_case__ = ConsistencyModelPipeline(unet=_a , scheduler=_a )
pipe.to(torch_device=_a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_inputs(get_fixed_latents=_a , device=_a )
snake_case__ = 1
snake_case__ = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_a , enable_math=_a , enable_mem_efficient=_a ):
snake_case__ = pipe(**_a ).images
assert image.shape == (1, 64, 64, 3)
snake_case__ = image[0, -3:, -3:, -1]
snake_case__ = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 33
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'blenderbot-small'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , lowerCamelCase=50265 , lowerCamelCase=512 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="gelu" , lowerCamelCase=512 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=2 , **lowerCamelCase , ) -> List[str]:
"""simple docstring"""
snake_case__ : List[str] = vocab_size
snake_case__ : List[str] = max_position_embeddings
snake_case__ : Optional[Any] = d_model
snake_case__ : Dict = encoder_ffn_dim
snake_case__ : Union[str, Any] = encoder_layers
snake_case__ : int = encoder_attention_heads
snake_case__ : Any = decoder_ffn_dim
snake_case__ : Optional[int] = decoder_layers
snake_case__ : Dict = decoder_attention_heads
snake_case__ : List[Any] = dropout
snake_case__ : str = attention_dropout
snake_case__ : Optional[Any] = activation_dropout
snake_case__ : str = activation_function
snake_case__ : int = init_std
snake_case__ : List[Any] = encoder_layerdrop
snake_case__ : int = decoder_layerdrop
snake_case__ : List[str] = use_cache
snake_case__ : Tuple = encoder_layers
snake_case__ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , decoder_start_token_id=lowerCamelCase , forced_eos_token_id=lowerCamelCase , **lowerCamelCase , )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
@property
def lowercase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case__ : Dict = {0: '''batch'''}
snake_case__ : Any = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
snake_case__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
snake_case__ : Tuple = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case__ : List[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case__ ,snake_case__ : List[str] = self.num_layers
for i in range(lowerCamelCase ):
snake_case__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case__ : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
snake_case__ : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def lowercase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Optional[int] = super().outputs
else:
snake_case__ : List[str] = super(lowerCamelCase , self ).outputs
if self.use_past:
snake_case__ ,snake_case__ : Dict = self.num_layers
for i in range(lowerCamelCase ):
snake_case__ : Dict = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case__ : Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ) -> Mapping[str, Any]:
"""simple docstring"""
snake_case__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Generate decoder inputs
snake_case__ : List[Any] = seq_length if not self.use_past else 1
snake_case__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case__ : Dict = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
snake_case__ : Union[str, Any] = dict(**lowerCamelCase , **lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case__ ,snake_case__ : List[str] = common_inputs['''input_ids'''].shape
snake_case__ : Any = common_inputs['''decoder_input_ids'''].shape[1]
snake_case__ ,snake_case__ : Any = self.num_attention_heads
snake_case__ : Optional[int] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ : Optional[Any] = decoder_seq_length + 3
snake_case__ : str = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case__ : str = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowerCamelCase , lowerCamelCase )] , dim=1 )
snake_case__ : int = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case__ ,snake_case__ : Optional[int] = self.num_layers
snake_case__ : Tuple = min(lowerCamelCase , lowerCamelCase )
snake_case__ : str = max(lowerCamelCase , lowerCamelCase ) - min_num_layers
snake_case__ : str = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
) )
# TODO: test this.
snake_case__ : Union[str, Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowerCamelCase , lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) )
return common_inputs
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ) -> Mapping[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case__ ,snake_case__ : Optional[int] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case__ : str = seqlen + 2
snake_case__ ,snake_case__ : Dict = self.num_layers
snake_case__ ,snake_case__ : Optional[Any] = self.num_attention_heads
snake_case__ : List[str] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ : List[str] = common_inputs['''attention_mask'''].dtype
snake_case__ : int = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 )
snake_case__ : Any = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase )
]
return common_inputs
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ) -> Mapping[str, Any]:
"""simple docstring"""
snake_case__ : Dict = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case__ : str = tokenizer.num_special_tokens_to_add(lowerCamelCase )
snake_case__ : Dict = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
snake_case__ : List[str] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case__ : List[str] = dict(tokenizer(lowerCamelCase , return_tensors=lowerCamelCase ) )
return common_inputs
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
elif self.task == "causal-lm":
snake_case__ : List[str] = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
else:
snake_case__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
return common_inputs
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Union[str, Any] = super()._flatten_past_key_values_(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
snake_case__ : int = super(lowerCamelCase , self )._flatten_past_key_values_(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
| 261
| 0
|
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
__SCREAMING_SNAKE_CASE =logging.getLogger()
def a ():
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
SCREAMING_SNAKE_CASE_ = parser.parse_args()
return args.f
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = os.path.join(_lowerCAmelCase , '''all_results.json''' )
if os.path.exists(_lowerCAmelCase ):
with open(_lowerCAmelCase , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ = json.load(_lowerCAmelCase )
else:
raise ValueError(F"can't find {path}" )
return results
def a ():
SCREAMING_SNAKE_CASE_ = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
__SCREAMING_SNAKE_CASE =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
@classmethod
def _A ( cls: str ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
SCREAMING_SNAKE_CASE_ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def _A ( cls: str ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _A ( self: Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(_lowerCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _A ( self: Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(_lowerCamelCase )
self.assertLess(result['''perplexity'''] , 1_00 )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _A ( self: Dict ):
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(_lowerCamelCase )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _A ( self: Optional[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
SCREAMING_SNAKE_CASE_ = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(_lowerCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _A ( self: str ):
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(_lowerCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _A ( self: str ):
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(_lowerCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _A ( self: Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(_lowerCamelCase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _A ( self: Any ):
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(_lowerCamelCase )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''translation_no_trainer''' ) ) )
@slow
def _A ( self: Tuple ):
SCREAMING_SNAKE_CASE_ = logging.StreamHandler(sys.stdout )
logger.addHandler(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(_lowerCamelCase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _A ( self: Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(_lowerCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''image_classification_no_trainer''' ) ) )
| 89
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
def __init__( self: List[Any] , *_lowerCamelCase: Tuple , _lowerCamelCase: Optional[Any]=None , _lowerCamelCase: Dict=None , **_lowerCamelCase: Dict ):
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = eval_examples
SCREAMING_SNAKE_CASE_ = post_process_function
def _A ( self: Tuple , _lowerCamelCase: Optional[Dataset] = None , _lowerCamelCase: List[str]=None , _lowerCamelCase: Optional[List[str]] = None , _lowerCamelCase: str = "eval" , **_lowerCamelCase: Union[str, Any] , ):
SCREAMING_SNAKE_CASE_ = gen_kwargs.copy()
SCREAMING_SNAKE_CASE_ = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
SCREAMING_SNAKE_CASE_ = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
SCREAMING_SNAKE_CASE_ = gen_kwargs
SCREAMING_SNAKE_CASE_ = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE_ = self.get_eval_dataloader(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ = self.compute_metrics
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = time.time()
SCREAMING_SNAKE_CASE_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE_ = eval_loop(
_lowerCamelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , )
finally:
SCREAMING_SNAKE_CASE_ = compute_metrics
SCREAMING_SNAKE_CASE_ = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
_lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
SCREAMING_SNAKE_CASE_ = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
SCREAMING_SNAKE_CASE_ = metrics.pop(_lowerCamelCase )
metrics.update(output.metrics )
else:
SCREAMING_SNAKE_CASE_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowerCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCamelCase )
return metrics
def _A ( self: List[str] , _lowerCamelCase: int , _lowerCamelCase: List[str] , _lowerCamelCase: Optional[int]=None , _lowerCamelCase: str = "test" , **_lowerCamelCase: str ):
SCREAMING_SNAKE_CASE_ = gen_kwargs.copy()
SCREAMING_SNAKE_CASE_ = self.get_test_dataloader(_lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ = self.compute_metrics
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = time.time()
SCREAMING_SNAKE_CASE_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE_ = eval_loop(
_lowerCamelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , )
finally:
SCREAMING_SNAKE_CASE_ = compute_metrics
SCREAMING_SNAKE_CASE_ = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
_lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE_ = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , '''predict''' )
SCREAMING_SNAKE_CASE_ = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
SCREAMING_SNAKE_CASE_ = metrics.pop(_lowerCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCamelCase )
| 89
| 1
|
def _UpperCAmelCase ( UpperCamelCase: dict ):
"""simple docstring"""
__lowerCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowerCAmelCase = set()
return any(
node not in visited and depth_first_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for node in graph )
def _UpperCAmelCase ( UpperCamelCase: dict , UpperCamelCase: int , UpperCamelCase: set , UpperCamelCase: set ):
"""simple docstring"""
visited.add(UpperCamelCase )
rec_stk.add(UpperCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(UpperCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 611
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a :
def __init__( self : str , snake_case__ : Dict , snake_case__ : Optional[int]=13 , snake_case__ : Any=30 , snake_case__ : int=2 , snake_case__ : List[Any]=3 , snake_case__ : List[str]=True , snake_case__ : Dict=True , snake_case__ : Dict=32 , snake_case__ : Union[str, Any]=5 , snake_case__ : Tuple=4 , snake_case__ : Tuple=37 , snake_case__ : Tuple="gelu" , snake_case__ : Any=0.1 , snake_case__ : str=0.1 , snake_case__ : Dict=10 , snake_case__ : List[str]=0.0_2 , snake_case__ : int=3 , snake_case__ : Tuple=None , snake_case__ : Any=2 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
__lowerCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = num_patches + 2
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
"""simple docstring"""
__lowerCAmelCase = DeiTModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any ):
"""simple docstring"""
__lowerCAmelCase = DeiTForMaskedImageModeling(config=snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = model(snake_case__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = DeiTForMaskedImageModeling(snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(snake_case__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : str , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = self.type_sequence_label_size
__lowerCAmelCase = DeiTForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = DeiTForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase_ : str = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase_ : int = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase_ : List[str] = False
lowercase_ : Dict = False
lowercase_ : str = False
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = DeiTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case__ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Tuple=False ):
"""simple docstring"""
__lowerCAmelCase = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(snake_case__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
__lowerCAmelCase = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
__lowerCAmelCase = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
__lowerCAmelCase = model(**snake_case__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowerCAmelCase = False
__lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(snake_case__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
__lowerCAmelCase = model_class(snake_case__ )
model.gradient_checkpointing_enable()
model.to(snake_case__ )
model.train()
__lowerCAmelCase = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
__lowerCAmelCase = model(**snake_case__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(snake_case__ ),
*get_values(snake_case__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
__lowerCAmelCase = problem_type["title"]
__lowerCAmelCase = problem_type["num_labels"]
__lowerCAmelCase = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
__lowerCAmelCase = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if problem_type["num_labels"] > 1:
__lowerCAmelCase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
__lowerCAmelCase = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=snake_case__ ) as warning_list:
__lowerCAmelCase = model(**snake_case__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = DeiTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
snake_case__ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**snake_case__ )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
__lowerCAmelCase = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=snake_case__ , return_tensors="pt" )
__lowerCAmelCase = inputs.pixel_values.to(snake_case__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__lowerCAmelCase = model(snake_case__ )
| 611
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : str = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "fnet"
def __init__( self : Tuple , _a : str=3_2000 , _a : Optional[int]=768 , _a : int=12 , _a : Tuple=3072 , _a : List[str]="gelu_new" , _a : Any=0.1 , _a : Optional[Any]=512 , _a : Any=4 , _a : Optional[int]=0.02 , _a : Any=1E-12 , _a : str=False , _a : str=512 , _a : List[Any]=3 , _a : List[str]=1 , _a : Dict=2 , **_a : int , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =type_vocab_size
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =use_tpu_fourier_optimizations
_SCREAMING_SNAKE_CASE =tpu_short_seq_length
| 712
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowerCamelCase( a__ ,a__=() ,a__=None ,a__="no" ,a__="29500"):
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
if any(key.startswith('''KAGGLE''') for key in os.environ.keys()):
_SCREAMING_SNAKE_CASE =True
elif "IPython" in sys.modules:
_SCREAMING_SNAKE_CASE ='''google.colab''' in str(sys.modules['''IPython'''].get_ipython())
try:
_SCREAMING_SNAKE_CASE =PrecisionType(mixed_precision.lower())
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.")
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' ,a__) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''')
if num_processes is None:
_SCREAMING_SNAKE_CASE =8
_SCREAMING_SNAKE_CASE =PrepareForLaunch(a__ ,distributed_type='''TPU''')
print(f"Launching a training on {num_processes} TPU cores.")
xmp.spawn(a__ ,args=a__ ,nprocs=a__ ,start_method='''fork''')
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''')
else:
print('''Launching training on one CPU.''')
function(*a__)
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''')
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''')
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''')
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=a__ ,master_addr='''127.0.01''' ,master_port=a__ ,mixed_precision=a__):
_SCREAMING_SNAKE_CASE =PrepareForLaunch(a__ ,distributed_type='''MULTI_GPU''')
print(f"Launching training on {num_processes} GPUs.")
try:
start_processes(a__ ,args=a__ ,nprocs=a__ ,start_method='''fork''')
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''') from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
_SCREAMING_SNAKE_CASE ='''1'''
print('''Launching training on MPS.''')
elif torch.cuda.is_available():
print('''Launching training on one GPU.''')
else:
print('''Launching training on CPU.''')
function(*a__)
def lowerCamelCase( a__ ,a__=() ,a__=2):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=a__ ,master_addr='''127.0.01''' ,master_port='''29500''' ,accelerate_mixed_precision='''no''' ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu='''yes''' ,):
_SCREAMING_SNAKE_CASE =PrepareForLaunch(a__ ,debug=a__)
start_processes(a__ ,args=a__ ,nprocs=a__ ,start_method='''fork''')
| 191
| 0
|
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = 0 ) -> list:
"""simple docstring"""
snake_case_ : Dict = length or len(_UpperCamelCase )
snake_case_ : Any = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
snake_case_ , snake_case_ : Tuple = list_data[i + 1], list_data[i]
snake_case_ : Optional[Any] = True
return list_data if not swapped else bubble_sort(_UpperCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60
|
import os
from collections.abc import Iterator
def _lowerCamelCase( lowercase__ = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(lowercase__ ):
__lowercase= [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(lowercase__ )[1] in (".py", ".ipynb"):
yield os.path.join(lowercase__ , lowercase__ ).lstrip('./' )
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return F'{i * " "}*' if i else "\n##"
def _lowerCamelCase( lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
__lowercase= old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(lowercase__ ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(lowercase__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _lowerCamelCase( lowercase__ = "." ) -> None:
'''simple docstring'''
__lowercase= ''
for filepath in sorted(good_file_paths(lowercase__ ) ):
__lowercase, __lowercase= os.path.split(lowercase__ )
if filepath != old_path:
__lowercase= print_path(lowercase__ , lowercase__ )
__lowercase= (filepath.count(os.sep ) + 1) if filepath else 0
__lowercase= F'{filepath}/{filename}'.replace(' ' , '%20' )
__lowercase= os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(lowercase__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 230
| 0
|
from __future__ import annotations
def __magic_name__( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if len(__UpperCAmelCase ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
_lowerCamelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase = ''''''
else:
_lowerCamelCase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase = in_proj_bias[: config.hidden_size]
_lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase = in_proj_bias[-config.hidden_size :]
def __magic_name__( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = dct.pop(__UpperCAmelCase )
_lowerCamelCase = val
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase = 8
# set labels if required
if not base_model:
_lowerCamelCase = 1000
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase = 384
_lowerCamelCase = 1536
_lowerCamelCase = 12
_lowerCamelCase = 6
# load original model from torch hub
_lowerCamelCase = torch.hub.load('''facebookresearch/dino:main''' , __UpperCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCAmelCase )
_lowerCamelCase = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase ).eval()
else:
_lowerCamelCase = ViTForImageClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase = ViTImageProcessor()
_lowerCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowerCamelCase = encoding['''pixel_values''']
_lowerCamelCase = model(__UpperCAmelCase )
if base_model:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1E-3 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
snake_case__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 638
| 1
|
from __future__ import annotations
from collections import deque
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : list[str] ) -> Optional[int]:
lowerCAmelCase__ = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(SCREAMING_SNAKE_CASE__ )
self.set_fail_transitions()
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> None:
lowerCAmelCase__ = 0
for character in keyword:
lowerCAmelCase__ = self.find_next_state(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
lowerCAmelCase__ = len(self.adlist ) - 1
else:
lowerCAmelCase__ = next_state
self.adlist[current_state]["output"].append(SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> None:
lowerCAmelCase__ = deque()
for node in self.adlist[0]["next_states"]:
q.append(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = 0
while q:
lowerCAmelCase__ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.adlist[r]["fail_state"]
while (
self.find_next_state(SCREAMING_SNAKE_CASE__ , self.adlist[child]["value"] ) is None
and state != 0
):
lowerCAmelCase__ = self.adlist[state]["fail_state"]
lowerCAmelCase__ = self.find_next_state(
SCREAMING_SNAKE_CASE__ , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
lowerCAmelCase__ = 0
lowerCAmelCase__ = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def a ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> dict[str, list[int]]:
lowerCAmelCase__ = {} # returns a dict with keywords and list of its occurrences
lowerCAmelCase__ = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
while (
self.find_next_state(SCREAMING_SNAKE_CASE__ , string[i] ) is None
and current_state != 0
):
lowerCAmelCase__ = self.adlist[current_state]["fail_state"]
lowerCAmelCase__ = self.find_next_state(SCREAMING_SNAKE_CASE__ , string[i] )
if next_state is None:
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
lowerCAmelCase__ = []
result[key].append(i - len(SCREAMING_SNAKE_CASE__ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class __lowercase ( __lowerCamelCase ):
snake_case_ = """open-llama"""
def __init__( self : Dict ,A : str=100_000 ,A : str=4_096 ,A : Optional[Any]=11_008 ,A : Tuple=32 ,A : str=32 ,A : Optional[int]="silu" ,A : List[Any]=2_048 ,A : str=0.0_2 ,A : Optional[int]=1e-6 ,A : int=True ,A : Tuple=0 ,A : str=1 ,A : Any=2 ,A : Optional[Any]=False ,A : int=True ,A : Any=0.1 ,A : Optional[Any]=0.1 ,A : Optional[Any]=True ,A : Union[str, Any]=True ,A : Tuple=None ,**A : Optional[int] ,):
'''simple docstring'''
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : Optional[Any] = initializer_range
UpperCAmelCase__ : Optional[int] = rms_norm_eps
UpperCAmelCase__ : Any = use_cache
UpperCAmelCase__ : Optional[Any] = kwargs.pop(
"""use_memorry_efficient_attention""" ,A )
UpperCAmelCase__ : Any = hidden_dropout_prob
UpperCAmelCase__ : str = attention_dropout_prob
UpperCAmelCase__ : Optional[int] = use_stable_embedding
UpperCAmelCase__ : Tuple = shared_input_output_embedding
UpperCAmelCase__ : Tuple = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,tie_word_embeddings=A ,**A ,)
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,A ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"got {self.rope_scaling}" )
UpperCAmelCase__ : List[Any] = self.rope_scaling.get("""type""" ,A )
UpperCAmelCase__ : int = self.rope_scaling.get("""factor""" ,A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(A ,A ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 65
| 0
|
def _lowerCamelCase ( A_ : list[list] ) -> list[list]:
'''simple docstring'''
UpperCamelCase__ : Dict =current_set.copy()
for row_index, row in enumerate(A_ ):
UpperCamelCase__ : int =row[0]
for column_index, column in enumerate(A_ ):
if magnitude == 0:
UpperCamelCase__ : Dict =column
continue
UpperCamelCase__ : Optional[Any] =column / magnitude
# Subtract to cancel term
UpperCamelCase__ : str =current_set[0]
UpperCamelCase__ : List[str] =[first_row]
UpperCamelCase__ : Optional[int] =current_set[1::]
for row in current_set:
UpperCamelCase__ : Optional[Any] =[]
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(A_ )
continue
for column_index in range(len(A_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(A_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
UpperCamelCase__ : Any =final_set[0]
UpperCamelCase__ : Dict =[]
UpperCamelCase__ : Optional[int] =[]
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
UpperCamelCase__ : int =simplify(A_ )
for i in range(len(A_ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , A_ )
UpperCamelCase__ : str =resultant
return final_set
def _lowerCamelCase ( A_ : list[list] ) -> list:
'''simple docstring'''
if len(A_ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
UpperCamelCase__ : Union[str, Any] =len(A_ ) + 1
if any(len(A_ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(A_ , (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(A_ ) == 1:
return [equations[0][-1] / equations[0][0]]
UpperCamelCase__ : Union[str, Any] =equations.copy()
if any(0 in row for row in data_set ):
UpperCamelCase__ : List[str] =data_set.copy()
UpperCamelCase__ : Union[str, Any] =[]
for row_index, row in enumerate(A_ ):
if 0 not in row:
UpperCamelCase__ : Tuple =data_set.pop(A_ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0 , A_ )
UpperCamelCase__ : List[Any] =data_set.copy()
UpperCamelCase__ : Optional[Any] =simplify(A_ )
UpperCamelCase__ : Any =simplified[::-1]
UpperCamelCase__ : list =[]
for row in simplified:
UpperCamelCase__ : int =row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
UpperCamelCase__ : Dict =row.copy()[: len(A_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(A_ ) == 0:
solutions.append(0 )
continue
UpperCamelCase__ : str =temp_row[1::]
UpperCamelCase__ : List[Any] =temp_row[::-1]
for column_index, column in enumerate(A_ ):
current_solution -= column * solutions[column_index]
solutions.append(A_ )
UpperCamelCase__ : int =[]
for item in solutions:
final.append(float(round(A_ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 582
|
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
__UpperCAmelCase = parser.parse_args()
if args.model_type == "roberta":
__UpperCAmelCase = RobertaForMaskedLM.from_pretrained(args.model_name)
__UpperCAmelCase = """roberta"""
elif args.model_type == "gpt2":
__UpperCAmelCase = GPTaLMHeadModel.from_pretrained(args.model_name)
__UpperCAmelCase = """transformer"""
__UpperCAmelCase = model.state_dict()
__UpperCAmelCase = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
__UpperCAmelCase = state_dict[F"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
__UpperCAmelCase = F"""{prefix}.embeddings.{w}.weight"""
__UpperCAmelCase = state_dict[param_name]
for w in ["weight", "bias"]:
__UpperCAmelCase = F"""{prefix}.embeddings.LayerNorm.{w}"""
__UpperCAmelCase = state_dict[param_name]
# Transformer Blocks #
__UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[
F"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
__UpperCAmelCase = state_dict[F"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
__UpperCAmelCase = state_dict[F"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[F"""lm_head.dense.{w}"""]
__UpperCAmelCase = state_dict[F"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[F"""{prefix}.ln_f.{w}"""]
__UpperCAmelCase = state_dict["""lm_head.weight"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 582
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 230
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowerCAmelCase = logging.get_logger(__name__)
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Tuple:
'''simple docstring'''
__lowercase= set()
__lowercase= []
def parse_line(lowercase__ ):
for line in fp:
if isinstance(lowercase__ , lowercase__ ):
__lowercase= line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(lowercase__ ) > 0:
__lowercase= '\n'.join(lowercase__ )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(lowercase__ )
buffer.clear()
continue
else:
__lowercase= line.strip()
buffer.append(lowercase__ )
if from_gh:
for filename in os.listdir(lowercase__ ):
__lowercase= os.path.join(lowercase__ , lowercase__ )
if not os.path.isdir(lowercase__ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowercase__ ) as fp:
parse_line(lowercase__ )
else:
try:
with zipfile.ZipFile(lowercase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase__ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowercase__ ) as fp:
parse_line(lowercase__ )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[Any]:
'''simple docstring'''
__lowercase= set()
__lowercase= [os.path.join(lowercase__ , lowercase__ ) for p in os.listdir(lowercase__ ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowercase__ , lowercase__ ) )
return selected_warnings
if __name__ == "__main__":
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return values.split(',' )
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowerCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowerCAmelCase = extract_warnings(args.output_dir, args.targets)
lowerCAmelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 230
| 1
|
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __UpperCAmelCase ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = torch.nn.Linear(2 , 4 )
UpperCAmelCase__ = torch.optim.AdamW(model.parameters() , lr=1.0 )
UpperCAmelCase__ = torch.optim.lr_scheduler.OneCycleLR(__A , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
UpperCAmelCase__ = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
UpperCAmelCase__ = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __UpperCAmelCase ( __A ) -> Union[str, Any]:
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __UpperCAmelCase ( __A ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(__A )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
@require_cuda
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(_lowercase ):
UpperCAmelCase__ = Accelerator(cpu=_lowercase )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = Accelerator()
UpperCAmelCase__ = GradientState()
assert state.num_steps == 1
UpperCAmelCase__ = 4
assert state.num_steps == 4
assert state.sync_gradients is True
UpperCAmelCase__ = False
assert state.sync_gradients is False
GradientState._reset_state()
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = Accelerator()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = create_components()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = accelerator.prepare(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = Accelerator()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = create_components()
accelerator.prepare(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*_lowercase : Dict , **_lowercase : Any ):
pass
with patch("torch.cuda.set_device" , _lowercase ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
UpperCAmelCase__ = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = Accelerator()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = create_components()
accelerator.prepare(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
UpperCAmelCase__ = get_signature(_lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_lowercase )
# make sure random weights don't match
load_random_weights(_lowercase )
self.assertTrue(abs(model_signature - get_signature(_lowercase ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(_lowercase )
self.assertTrue(abs(model_signature - get_signature(_lowercase ) ) < 1E-3 )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = Accelerator()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = create_components()
accelerator.prepare(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
UpperCAmelCase__ = get_signature(_lowercase )
# saving hook
def save_config(_lowercase : Optional[Any] , _lowercase : Tuple , _lowercase : int ):
UpperCAmelCase__ = {"class_name": models[0].__class__.__name__}
with open(os.path.join(_lowercase , "data.json" ) , "w" ) as f:
json.dump(_lowercase , _lowercase )
# loading hook
def load_config(_lowercase : List[str] , _lowercase : Optional[Any] ):
with open(os.path.join(_lowercase , "data.json" ) , "r" ) as f:
UpperCAmelCase__ = json.load(_lowercase )
UpperCAmelCase__ = config["class_name"]
UpperCAmelCase__ = accelerator.register_save_state_pre_hook(_lowercase )
UpperCAmelCase__ = accelerator.register_load_state_pre_hook(_lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_lowercase )
# make sure random weights don't match with hooks
load_random_weights(_lowercase )
self.assertTrue(abs(model_signature - get_signature(_lowercase ) ) > 1E-3 )
# random class name to verify correct one is loaded
UpperCAmelCase__ = "random"
# make sure loaded weights match with hooks
accelerator.load_state(_lowercase )
self.assertTrue(abs(model_signature - get_signature(_lowercase ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_lowercase )
# make sure random weights don't match with hooks removed
load_random_weights(_lowercase )
self.assertTrue(abs(model_signature - get_signature(_lowercase ) ) > 1E-3 )
# random class name to verify correct one is loaded
UpperCAmelCase__ = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(_lowercase )
self.assertTrue(abs(model_signature - get_signature(_lowercase ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = Accelerator()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = create_components()
UpperCAmelCase__ = None
# This should work
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
self.assertTrue(dummy_obj is None )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = Accelerator()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = create_components()
UpperCAmelCase__ = [1, 2, 3]
# This should work
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
self.assertEqual(
getattr(_lowercase , "_is_accelerate_prepared" , _lowercase ) , _lowercase , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(_lowercase , "_is_accelerate_prepared" , _lowercase ) , _lowercase , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(_lowercase , "_is_accelerate_prepared" , _lowercase ) , _lowercase , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(_lowercase , "_is_accelerate_prepared" , _lowercase ) , _lowercase , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(_lowercase , "_is_accelerate_prepared" , _lowercase ) , _lowercase , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(_lowercase , "_is_accelerate_prepared" , _lowercase ) , _lowercase , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=_lowercase , device_map={"": 0} , )
UpperCAmelCase__ = Accelerator()
# This should work
UpperCAmelCase__ = accelerator.prepare(_lowercase )
@slow
@require_bnb
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
UpperCAmelCase__ = Accelerator()
with init_empty_weights():
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
UpperCAmelCase__ = infer_auto_device_map(_lowercase )
UpperCAmelCase__ = "cpu"
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=_lowercase , load_in_abit=_lowercase , llm_inta_enable_fpaa_cpu_offload=_lowercase )
# This should not work and get value error
with self.assertRaises(_lowercase ):
UpperCAmelCase__ = accelerator.prepare(_lowercase )
@slow
@require_bnb
@require_multi_gpu
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
UpperCAmelCase__ = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
UpperCAmelCase__ = infer_auto_device_map(_lowercase )
UpperCAmelCase__ = 1
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=_lowercase , device_map=_lowercase , )
UpperCAmelCase__ = Accelerator()
# This should not work and get value error
with self.assertRaises(_lowercase ):
UpperCAmelCase__ = accelerator.prepare(_lowercase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
with init_empty_weights():
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
UpperCAmelCase__ = infer_auto_device_map(_lowercase )
UpperCAmelCase__ = 1
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=_lowercase , device_map=_lowercase , )
UpperCAmelCase__ = Accelerator()
# This should work
UpperCAmelCase__ = accelerator.prepare(_lowercase )
@require_cuda
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = torch.nn.Linear(10 , 10 )
UpperCAmelCase__ = torch.optim.SGD(model.parameters() , lr=0.0_1 )
UpperCAmelCase__ = Accelerator(cpu=_lowercase )
UpperCAmelCase__ = accelerator.prepare(_lowercase )
| 277
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
A = logging.get_logger(__name__)
class lowercase__ :
def __init__( self : List[str] , _lowercase : str = None , _lowercase : uuid.UUID = None , _lowercase : Dict=None , _lowercase : Any=None ):
"""simple docstring"""
if not conversation_id:
UpperCAmelCase__ = uuid.uuida()
if past_user_inputs is None:
UpperCAmelCase__ = []
if generated_responses is None:
UpperCAmelCase__ = []
UpperCAmelCase__ = conversation_id
UpperCAmelCase__ = past_user_inputs
UpperCAmelCase__ = generated_responses
UpperCAmelCase__ = text
def __eq__( self : Any , _lowercase : str ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _UpperCAmelCase ( self : Any , _lowercase : str , _lowercase : bool = False ):
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
UpperCAmelCase__ = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
UpperCAmelCase__ = text
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
UpperCAmelCase__ = None
def _UpperCAmelCase ( self : List[str] , _lowercase : str ):
"""simple docstring"""
self.generated_responses.append(_lowercase )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
UpperCAmelCase__ = "user" if is_user else "bot"
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
__SCREAMING_SNAKE_CASE , R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : str , *_lowercase : Union[str, Any] , **_lowercase : Any ):
"""simple docstring"""
super().__init__(*_lowercase , **_lowercase )
if self.tokenizer.pad_token_id is None:
UpperCAmelCase__ = self.tokenizer.eos_token
def _UpperCAmelCase ( self : List[str] , _lowercase : int=None , _lowercase : Any=None , _lowercase : List[str]=None , **_lowercase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
if min_length_for_response is not None:
UpperCAmelCase__ = min_length_for_response
if minimum_tokens is not None:
UpperCAmelCase__ = minimum_tokens
if "max_length" in generate_kwargs:
UpperCAmelCase__ = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
UpperCAmelCase__ = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_lowercase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Dict , _lowercase : Union[Conversation, List[Conversation]] , _lowercase : List[str]=0 , **_lowercase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = super().__call__(_lowercase , num_workers=_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1:
return outputs[0]
return outputs
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : Conversation , _lowercase : List[Any]=32 ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
"Add user inputs with the conversation's `add_user_input` method" )
if hasattr(self.tokenizer , "_build_conversation_input_ids" ):
UpperCAmelCase__ = self.tokenizer._build_conversation_input_ids(_lowercase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
UpperCAmelCase__ = self._legacy_parse_and_tokenize(_lowercase )
if self.framework == "pt":
UpperCAmelCase__ = torch.LongTensor([input_ids] )
elif self.framework == "tf":
UpperCAmelCase__ = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : List[Any] , _lowercase : List[Any]=10 , **_lowercase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = generate_kwargs.get("max_length" , self.model.config.max_length )
UpperCAmelCase__ = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
UpperCAmelCase__ = max_length - minimum_tokens
UpperCAmelCase__ = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
UpperCAmelCase__ = model_inputs["attention_mask"][:, -trim:]
UpperCAmelCase__ = model_inputs.pop("conversation" )
UpperCAmelCase__ = max_length
UpperCAmelCase__ = self.model.generate(**_lowercase , **_lowercase )
if self.model.config.is_encoder_decoder:
UpperCAmelCase__ = 1
else:
UpperCAmelCase__ = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _UpperCAmelCase ( self : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any]=True ):
"""simple docstring"""
UpperCAmelCase__ = model_outputs["output_ids"]
UpperCAmelCase__ = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
UpperCAmelCase__ = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(_lowercase )
return conversation
def _UpperCAmelCase ( self : List[str] , _lowercase : Conversation ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer.eos_token_id
UpperCAmelCase__ = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
if len(_lowercase ) > self.tokenizer.model_max_length:
UpperCAmelCase__ = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 277
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 417
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowercase ( snake_case_ ):
lowercase = 'convbert'
def __init__( self : Union[str, Any] , snake_case : Tuple=3_0_5_2_2 , snake_case : List[Any]=7_6_8 , snake_case : Any=1_2 , snake_case : Optional[int]=1_2 , snake_case : Optional[int]=3_0_7_2 , snake_case : Tuple="gelu" , snake_case : Any=0.1 , snake_case : Tuple=0.1 , snake_case : Optional[Any]=5_1_2 , snake_case : str=2 , snake_case : Tuple=0.02 , snake_case : Any=1e-12 , snake_case : List[str]=1 , snake_case : Any=0 , snake_case : Tuple=2 , snake_case : Any=7_6_8 , snake_case : Any=2 , snake_case : Tuple=9 , snake_case : int=1 , snake_case : str=None , **snake_case : Dict , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case , )
UpperCamelCase_ : List[Any] = vocab_size
UpperCamelCase_ : Any = hidden_size
UpperCamelCase_ : int = num_hidden_layers
UpperCamelCase_ : Any = num_attention_heads
UpperCamelCase_ : List[Any] = intermediate_size
UpperCamelCase_ : str = hidden_act
UpperCamelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCamelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase_ : Dict = max_position_embeddings
UpperCamelCase_ : Dict = type_vocab_size
UpperCamelCase_ : str = initializer_range
UpperCamelCase_ : Any = layer_norm_eps
UpperCamelCase_ : Union[str, Any] = embedding_size
UpperCamelCase_ : int = head_ratio
UpperCamelCase_ : Optional[Any] = conv_kernel_size
UpperCamelCase_ : Any = num_groups
UpperCamelCase_ : int = classifier_dropout
class _lowercase ( snake_case_ ):
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase_ : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase_ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 417
| 1
|
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase__ = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a__ :
"""simple docstring"""
__lowerCamelCase = PegasusConfig
__lowerCamelCase = {}
__lowerCamelCase = "gelu"
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> str:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
A__ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
A__ = np.concatenate([input_ids, eos_tensor] , axis=1 )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ = prepare_pegasus_inputs_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return config, inputs_dict
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
A__ = 20
A__ = model_class_name(lowerCamelCase_ )
A__ = model.encode(inputs_dict["input_ids"] )
A__ , A__ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
A__ = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase_ , lowerCamelCase_ )
A__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
A__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A__ = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , decoder_position_ids=lowerCamelCase_ , )
A__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
A__ = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase_ , )
A__ = model.decode(lowerCamelCase_ , lowerCamelCase_ )
A__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = 20
A__ = model_class_name(lowerCamelCase_ )
A__ = model.encode(inputs_dict["input_ids"] )
A__ , A__ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
A__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
A__ = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase_ , lowerCamelCase_ )
A__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A__ = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , decoder_position_ids=lowerCamelCase_ , )
A__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
A__ = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase_ , decoder_position_ids=lowerCamelCase_ , )
A__ = model.decode(lowerCamelCase_ , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ )
A__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: int=None , SCREAMING_SNAKE_CASE_: int=None , ) -> Tuple:
'''simple docstring'''
if attention_mask is None:
A__ = np.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
A__ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a__ ( a__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__lowerCamelCase = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = FlaxPegasusModelTester(self )
A__ = ConfigTester(self , config_class=lowerCamelCase_ )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A__ = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
A__ = model_class(lowerCamelCase_ )
@jax.jit
def encode_jitted(lowercase , lowercase=None , **lowercase ):
return model.encode(input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
with self.subTest("JIT Enabled" ):
A__ = encode_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
A__ = encode_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A__ = model_class(lowerCamelCase_ )
A__ = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
A__ = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase , lowercase , lowercase ):
return model.decode(
decoder_input_ids=lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , encoder_outputs=lowerCamelCase_ , )
with self.subTest("JIT Enabled" ):
A__ = decode_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
A__ = decode_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
A__ = model_class_name.from_pretrained("google/pegasus-large" , from_pt=lowerCamelCase_ )
A__ = np.ones((1, 1) )
A__ = model(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
A__ = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
A__ = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning \'Oh I think you\'re nominated\'\", said Dappy.\"And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around.\"At the end of the day we\'re grateful to be where we are in our careers.\"If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
A__ = [
"California\'s largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.",
]
A__ = tokenizer(lowerCamelCase_ , return_tensors="np" , truncation=lowerCamelCase_ , max_length=512 , padding=lowerCamelCase_ )
A__ = model.generate(**lowerCamelCase_ , num_beams=2 ).sequences
A__ = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
assert tgt_text == decoded
| 710
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , *lowercase , **lowercase ) -> None:
'''simple docstring'''
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , lowercase , )
super().__init__(*lowercase , **lowercase )
| 626
| 0
|
import re
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(lowerCAmelCase__ , lowerCAmelCase__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 30
|
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 8.3_1_4_4_5_9_8
def _UpperCamelCase ( lowerCAmelCase__: float ,lowerCAmelCase__: float ) -> float:
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
SCREAMING_SNAKE_CASE : List[str] = 300
SCREAMING_SNAKE_CASE : List[Any] = 28
SCREAMING_SNAKE_CASE : Optional[Any] = rms_speed_of_molecule(temperature, molar_mass)
print(f"Vrms of Nitrogen gas at 300 K is {vrms} m/s")
| 294
| 0
|
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
UpperCAmelCase__ = 637_8137.0
UpperCAmelCase__ = 635_6752.31_4245
UpperCAmelCase__ = 6_3_7_8_1_3_7
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = (AXIS_A - AXIS_B) / AXIS_A
_UpperCAmelCase = atan((1 - flattening) * tan(radians(lowercase ) ) )
_UpperCAmelCase = atan((1 - flattening) * tan(radians(lowercase ) ) )
_UpperCAmelCase = radians(lowercase )
_UpperCAmelCase = radians(lowercase )
# Equation
_UpperCAmelCase = sin((phi_a - phi_a) / 2 )
_UpperCAmelCase = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
_UpperCAmelCase = sqrt(sin_sq_phi + (cos(lowercase ) * cos(lowercase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275
|
"""simple docstring"""
from __future__ import annotations
from random import choice
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return choice(lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = random_pivot(lowercase )
# partition based on pivot
# linear time
_UpperCAmelCase = [e for e in lst if e < pivot]
_UpperCAmelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowercase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowercase ) < k - 1:
return kth_number(lowercase ,k - len(lowercase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowercase ,lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275
| 1
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class __A ( UpperCamelCase__ ):
a__ : int = """MCTCTFeatureExtractor"""
a__ : int = """AutoTokenizer"""
def __init__(self : Dict , __a : Tuple , __a : Optional[Any] ):
super().__init__(__a , __a )
UpperCAmelCase_ = self.feature_extractor
UpperCAmelCase_ = False
def __call__(self : Dict , *__a : int , **__a : str ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__a , **__a )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
UpperCAmelCase_ = kwargs.pop("raw_speech" )
else:
UpperCAmelCase_ = kwargs.pop("audio" , __a )
UpperCAmelCase_ = kwargs.pop("sampling_rate" , __a )
UpperCAmelCase_ = kwargs.pop("text" , __a )
if len(__a ) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
UpperCAmelCase_ = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a )
if text is not None:
UpperCAmelCase_ = self.tokenizer(__a , **__a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase_ = encodings["input_ids"]
return inputs
def _lowercase (self : List[Any] , *__a : List[Any] , **__a : int ):
return self.tokenizer.batch_decode(*__a , **__a )
def _lowercase (self : Optional[int] , *__a : str , **__a : List[Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__a , **__a )
UpperCAmelCase_ = kwargs.pop("input_features" , __a )
UpperCAmelCase_ = kwargs.pop("labels" , __a )
if len(__a ) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if input_features is not None:
UpperCAmelCase_ = self.feature_extractor.pad(__a , *__a , **__a )
if labels is not None:
UpperCAmelCase_ = self.tokenizer.pad(__a , **__a )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCAmelCase_ = labels["input_ids"]
return input_features
def _lowercase (self : Optional[int] , *__a : List[Any] , **__a : Tuple ):
return self.tokenizer.decode(*__a , **__a )
@contextmanager
def _lowercase (self : Any ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
UpperCAmelCase_ = True
UpperCAmelCase_ = self.tokenizer
yield
UpperCAmelCase_ = self.feature_extractor
UpperCAmelCase_ = False
| 78
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class lowerCAmelCase ( UpperCamelCase_ ):
A_ : Tuple = """van"""
def __init__( self : List[Any] , a__ : Dict=224 , a__ : Dict=3 , a__ : Union[str, Any]=[7, 3, 3, 3] , a__ : Optional[Any]=[4, 2, 2, 2] , a__ : Optional[Any]=[64, 128, 320, 512] , a__ : List[str]=[3, 3, 12, 3] , a__ : Any=[8, 8, 4, 4] , a__ : Optional[int]="gelu" , a__ : List[Any]=0.02 , a__ : Tuple=1e-6 , a__ : List[str]=1e-2 , a__ : List[str]=0.0 , a__ : List[Any]=0.0 , **a__ : Tuple , ):
'''simple docstring'''
super().__init__(**a__ )
lowerCAmelCase__ : Any = image_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : List[Any] = patch_sizes
lowerCAmelCase__ : Dict = strides
lowerCAmelCase__ : List[str] = hidden_sizes
lowerCAmelCase__ : Union[str, Any] = depths
lowerCAmelCase__ : Tuple = mlp_ratios
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : int = layer_norm_eps
lowerCAmelCase__ : Optional[Any] = layer_scale_init_value
lowerCAmelCase__ : List[str] = drop_path_rate
lowerCAmelCase__ : Any = dropout_rate
| 378
| 0
|
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
UpperCamelCase = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
UpperCamelCase = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
UpperCamelCase = BeautifulSoup(res.text, "html.parser")
UpperCamelCase = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f'''https://google.com{link.get('href')}''')
| 721
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Any , *lowerCamelCase__ :Union[str, Any] , **lowerCamelCase__ :int ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 383
| 0
|
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
A_ = sys.version_info >= (3, 10)
def _UpperCamelCase ( A=None , A=None ):
return field(default_factory=lambda: default , metadata=A )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : Tuple = 42
__lowerCamelCase : Optional[Any] = 42
__lowerCamelCase : Optional[int] = 42
__lowerCamelCase : Optional[int] = 42
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : int = 42
__lowerCamelCase : List[str] = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : Any = False
__lowerCamelCase : Tuple = True
__lowerCamelCase : int = None
class __lowerCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = "titi"
__lowerCamelCase : Any = "toto"
class __lowerCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = "titi"
__lowerCamelCase : str = "toto"
__lowerCamelCase : Optional[int] = 42
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : List[str] = "toto"
def UpperCamelCase__ ( self: Any ):
UpperCamelCase_ =BasicEnum(self.foo )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : Tuple = "toto"
def UpperCamelCase__ ( self: List[str] ):
UpperCamelCase_ =MixedTypeEnum(self.foo )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : int = None
__lowerCamelCase : Optional[int] = field(default=UpperCAmelCase__ , metadata={"help": "help message"} )
__lowerCamelCase : Dict = None
__lowerCamelCase : List[Any] = list_field(default=[] )
__lowerCamelCase : Dict = list_field(default=[] )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : Optional[Any] = list_field(default=[] )
__lowerCamelCase : Union[str, Any] = list_field(default=[1, 2, 3] )
__lowerCamelCase : int = list_field(default=["Hallo", "Bonjour", "Hello"] )
__lowerCamelCase : List[Any] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : Dict = field()
__lowerCamelCase : Union[str, Any] = field()
__lowerCamelCase : Union[str, Any] = field()
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ =BasicEnum(self.required_enum )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : Optional[int] = 42
__lowerCamelCase : List[str] = field()
__lowerCamelCase : str = None
__lowerCamelCase : int = field(default="toto" , metadata={"help": "help message"} )
__lowerCamelCase : Union[str, Any] = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : List[Any] = False
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Optional[int] = None
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : Dict = None
__lowerCamelCase : str = field(default=UpperCAmelCase__ , metadata={"help": "help message"} )
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = list_field(default=[] )
__lowerCamelCase : Tuple = list_field(default=[] )
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self: Any , UpperCamelCase_: argparse.ArgumentParser , UpperCamelCase_: argparse.ArgumentParser ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCamelCase_ ={k: v for k, v in vars(_UpperCAmelCase ).items() if k != "container"}
UpperCamelCase_ ={k: v for k, v in vars(_UpperCAmelCase ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , _UpperCAmelCase ) and yy.get("choices" , _UpperCAmelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](_UpperCAmelCase ) , yy["type"](_UpperCAmelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self: Any ):
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
UpperCamelCase_ =argparse.ArgumentParser()
expected.add_argument("--foo" , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument("--bar" , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument("--baz" , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument("--flag" , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs="?" )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ =["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((UpperCamelCase_ ) , ) =parser.parse_args_into_dataclasses(_UpperCAmelCase , look_for_args_file=_UpperCAmelCase )
self.assertFalse(example.flag )
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
UpperCamelCase_ =argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=_UpperCAmelCase )
expected.add_argument("--baz" , default="toto" , type=_UpperCAmelCase , help="help message" )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self: Any ):
UpperCamelCase_ =argparse.ArgumentParser()
expected.add_argument("--foo" , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs="?" )
expected.add_argument("--baz" , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=_UpperCAmelCase , dest="baz" )
expected.add_argument("--opt" , type=_UpperCAmelCase , default=_UpperCAmelCase )
UpperCamelCase_ =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCAmelCase )
for dataclass_type in dataclass_types:
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ =parser.parse_args([] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
UpperCamelCase_ =parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
UpperCamelCase_ =parser.parse_args(["--foo", "--baz"] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
UpperCamelCase_ =parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
UpperCamelCase_ =parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
def UpperCamelCase__ ( self: Tuple ):
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
UpperCamelCase_ =argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ =parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
UpperCamelCase_ =parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCamelCase_ =parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
UpperCamelCase_ =parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCamelCase_ =parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
UpperCamelCase_ =parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def UpperCamelCase__ ( self: List[str] ):
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : List[Any] = "toto"
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
UpperCamelCase_ =argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ =parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
UpperCamelCase_ =parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
UpperCamelCase_ =parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
UpperCamelCase_ =argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=_UpperCAmelCase )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=_UpperCAmelCase )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=_UpperCAmelCase )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=_UpperCAmelCase )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ =parser.parse_args([] )
self.assertEqual(
_UpperCAmelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCamelCase_ =parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(_UpperCAmelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =argparse.ArgumentParser()
expected.add_argument("--foo" , default=_UpperCAmelCase , type=_UpperCAmelCase )
expected.add_argument("--bar" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="help message" )
expected.add_argument("--baz" , default=_UpperCAmelCase , type=_UpperCAmelCase )
expected.add_argument("--ces" , nargs="+" , default=[] , type=_UpperCAmelCase )
expected.add_argument("--des" , nargs="+" , default=[] , type=_UpperCAmelCase )
UpperCamelCase_ =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCAmelCase )
for dataclass_type in dataclass_types:
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ =parser.parse_args([] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , bar=_UpperCAmelCase , baz=_UpperCAmelCase , ces=[] , des=[] ) )
UpperCamelCase_ =parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(_UpperCAmelCase , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def UpperCamelCase__ ( self: str ):
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
UpperCamelCase_ =argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument("--required_str" , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=_UpperCAmelCase , )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
UpperCamelCase_ =argparse.ArgumentParser()
expected.add_argument("--foo" , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=_UpperCAmelCase , )
expected.add_argument("--opt" , type=_UpperCAmelCase , default=_UpperCAmelCase )
expected.add_argument("--baz" , default="toto" , type=_UpperCAmelCase , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=_UpperCAmelCase )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
UpperCamelCase_ ={
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
UpperCamelCase_ =parser.parse_dict(_UpperCAmelCase )[0]
UpperCamelCase_ =BasicExample(**_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
UpperCamelCase_ ={
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(_UpperCAmelCase , parser.parse_dict , _UpperCAmelCase , allow_extra_keys=_UpperCAmelCase )
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
UpperCamelCase_ ={
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ =os.path.join(_UpperCAmelCase , "temp_json" )
os.mkdir(_UpperCAmelCase )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ =parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
UpperCamelCase_ =BasicExample(**_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
UpperCamelCase_ ={
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ =os.path.join(_UpperCAmelCase , "temp_yaml" )
os.mkdir(_UpperCAmelCase )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ =parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
UpperCamelCase_ =BasicExample(**_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self: Any ):
UpperCamelCase_ =HfArgumentParser(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
| 391
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def lowerCamelCase__ (self : List[Any] ) -> Tuple:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = PegasusTokenizer(_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def lowerCamelCase__ (self : Tuple , **_UpperCAmelCase : int ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase__ (self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = """</s>"""
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_UpperCAmelCase ) , 1103 )
def lowerCamelCase__ (self : str ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def lowerCamelCase__ (self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
lowercase__ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
lowercase__ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowercase__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
lowercase__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowercase__ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowercase__ = """To ensure a smooth flow of bank resolutions."""
lowercase__ = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowercase__ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowerCamelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = ["""This is going to be way too long.""" * 150, """short example"""]
lowercase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowercase__ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
lowercase__ = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def lowerCamelCase__ (self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = PegasusTokenizer(_UpperCAmelCase , offset=0 , mask_token_sent=_UpperCAmelCase , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def lowerCamelCase__ (self : str , **_UpperCAmelCase : Union[str, Any] ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
lowercase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
lowercase__ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
lowercase__ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@require_torch
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = ["""This is going to be way too long.""" * 1000, """short example"""]
lowercase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowercase__ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
lowercase__ = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
def lowerCamelCase__ (self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
lowercase__ = self._large_tokenizer(_UpperCAmelCase ).input_ids
self.assertListEqual(
_UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 15
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
class a__ ( A__ ):
def __init__( self :List[Any] , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :List[str] ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self :Tuple , _lowerCamelCase :int = 1 , _lowerCamelCase :int = 100 , _lowerCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCamelCase :Optional[float] = None , _lowerCamelCase :bool = True , ):
'''simple docstring'''
if audio_length_in_s is None:
UpperCamelCase_ : Optional[Any] =self.unet.config.sample_size / self.unet.config.sample_rate
UpperCamelCase_ : str =audio_length_in_s * self.unet.config.sample_rate
UpperCamelCase_ : List[Any] =2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
UpperCamelCase_ : Dict =int(_lowerCamelCase )
if sample_size % down_scale_factor != 0:
UpperCamelCase_ : int =(
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
UpperCamelCase_ : Tuple =int(_lowerCamelCase )
UpperCamelCase_ : Union[str, Any] =next(iter(self.unet.parameters() ) ).dtype
UpperCamelCase_ : str =(batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(_lowerCamelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCamelCase_ : Optional[int] =randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=self.device , dtype=_lowerCamelCase )
# set step values
self.scheduler.set_timesteps(_lowerCamelCase , device=audio.device )
UpperCamelCase_ : Optional[Any] =self.scheduler.timesteps.to(_lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase_ : Tuple =self.unet(_lowerCamelCase , _lowerCamelCase ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCamelCase_ : Optional[int] =self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
UpperCamelCase_ : Union[str, Any] =audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCamelCase_ : List[str] =audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_lowerCamelCase )
| 717
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a__ ( A__ ):
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
UpperCamelCase_ : List[str] =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , 'depth_multiplier' ) )
class a__ :
def __init__( self :Tuple , _lowerCamelCase :int , _lowerCamelCase :Optional[Any]=13 , _lowerCamelCase :List[Any]=3 , _lowerCamelCase :Optional[Any]=32 , _lowerCamelCase :str=0.25 , _lowerCamelCase :str=8 , _lowerCamelCase :str=8 , _lowerCamelCase :Tuple=6 , _lowerCamelCase :Optional[Any]=32 , _lowerCamelCase :Union[str, Any]=True , _lowerCamelCase :int=True , _lowerCamelCase :Optional[int]=True , _lowerCamelCase :Tuple="relu6" , _lowerCamelCase :List[Any]=1_280 , _lowerCamelCase :Optional[int]=0.1 , _lowerCamelCase :Optional[Any]=0.02 , _lowerCamelCase :Dict=True , _lowerCamelCase :List[str]=True , _lowerCamelCase :List[str]=10 , _lowerCamelCase :List[Any]=None , ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =parent
UpperCamelCase_ : Optional[Any] =batch_size
UpperCamelCase_ : List[str] =num_channels
UpperCamelCase_ : Union[str, Any] =image_size
UpperCamelCase_ : Union[str, Any] =depth_multiplier
UpperCamelCase_ : Optional[Any] =depth_divisible_by
UpperCamelCase_ : Optional[Any] =min_depth
UpperCamelCase_ : List[Any] =expand_ratio
UpperCamelCase_ : Any =tf_padding
UpperCamelCase_ : List[str] =output_stride
UpperCamelCase_ : Tuple =first_layer_is_expansion
UpperCamelCase_ : Any =finegrained_output
UpperCamelCase_ : Dict =hidden_act
UpperCamelCase_ : int =last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCamelCase_ : Optional[int] =classifier_dropout_prob
UpperCamelCase_ : str =use_labels
UpperCamelCase_ : List[Any] =is_training
UpperCamelCase_ : Tuple =num_labels
UpperCamelCase_ : Optional[int] =initializer_range
UpperCamelCase_ : Union[str, Any] =scope
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
UpperCamelCase_ : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ : Dict =None
UpperCamelCase_ : Dict =None
if self.use_labels:
UpperCamelCase_ : List[str] =ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase_ : List[Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase_ : Any =self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self :List[str] , _lowerCamelCase :Optional[int] , _lowerCamelCase :str , _lowerCamelCase :Tuple , _lowerCamelCase :List[str] ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =MobileNetVaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_ : List[Any] =model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :int , _lowerCamelCase :Optional[Any] , _lowerCamelCase :str , _lowerCamelCase :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : Tuple =self.num_labels
UpperCamelCase_ : List[str] =MobileNetVaForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_ : List[str] =model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self :Any , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :str , _lowerCamelCase :Dict ):
'''simple docstring'''
UpperCamelCase_ : Tuple =self.num_labels
UpperCamelCase_ : int =MobileNetVaForSemanticSegmentation(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_ : Dict =model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCamelCase_ : int =model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
UpperCamelCase_ : str =self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : int =config_and_inputs
UpperCamelCase_ : Dict ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase__ = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def lowerCamelCase_ ( self :Union[str, Any] ):
'''simple docstring'''
UpperCamelCase_ : Dict =MobileNetVaModelTester(self )
UpperCamelCase_ : Any =MobileNetVaConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Tuple =model_class(_lowerCamelCase )
UpperCamelCase_ : Dict =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : Optional[int] =[*signature.parameters.keys()]
UpperCamelCase_ : List[str] =['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def lowerCamelCase_ ( self :List[str] ):
'''simple docstring'''
UpperCamelCase_ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowerCamelCase_ ( self :Dict ):
'''simple docstring'''
def check_hidden_states_output(_lowerCamelCase :List[Any] , _lowerCamelCase :List[Any] , _lowerCamelCase :List[Any] ):
UpperCamelCase_ : List[str] =model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ : str =model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
UpperCamelCase_ : Optional[Any] =outputs.hidden_states
UpperCamelCase_ : List[str] =16
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
UpperCamelCase_ , UpperCamelCase_ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Dict =True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ : Dict =True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
UpperCamelCase_ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@slow
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : List[str] =MobileNetVaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def A_ ( ):
UpperCamelCase_ : Dict =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] =MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(_lowerCamelCase )
UpperCamelCase_ : List[Any] =self.default_image_processor
UpperCamelCase_ : List[Any] =prepare_img()
UpperCamelCase_ : List[str] =image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ : List[Any] =model(**_lowerCamelCase )
# verify the logits
UpperCamelCase_ : Optional[int] =torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCamelCase_ : Optional[Any] =torch.tensor([0.2445, -1.1993, 0.1905] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) )
@slow
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
UpperCamelCase_ : Dict =MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
UpperCamelCase_ : Optional[int] =model.to(_lowerCamelCase )
UpperCamelCase_ : Dict =MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
UpperCamelCase_ : Any =prepare_img()
UpperCamelCase_ : List[Any] =image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ : Any =model(**_lowerCamelCase )
UpperCamelCase_ : Any =outputs.logits
# verify the logits
UpperCamelCase_ : Optional[int] =torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , _lowerCamelCase )
UpperCamelCase_ : List[Any] =torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=_lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
| 395
| 0
|
UpperCamelCase__ : List[Any] = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
SCREAMING_SNAKE_CASE_ : Stack[int] = Stack()
SCREAMING_SNAKE_CASE_ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowerCamelCase_ ) )
elif i in operators:
# RULE 2
operator_stack.push(lowerCamelCase_ )
elif i == ")":
# RULE 4
SCREAMING_SNAKE_CASE_ : Optional[Any] = operator_stack.peek()
operator_stack.pop()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = operand_stack.peek()
operand_stack.pop()
SCREAMING_SNAKE_CASE_ : Tuple = operand_stack.peek()
operand_stack.pop()
SCREAMING_SNAKE_CASE_ : Any = operators[opr](lowerCamelCase_ , lowerCamelCase_ )
operand_stack.push(lowerCamelCase_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 105
|
from collections.abc import Iterable
from typing import Any
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = value
lowerCAmelCase__ :Node | None = None # Added in order to delete a node easier
lowerCAmelCase__ :Node | None = None
lowerCAmelCase__ :Node | None = None
def __repr__( self ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} , indent=1 )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Dict = root
def __str__( self ):
'''simple docstring'''
return str(self.root )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
if new_children is not None: # reset its kids
lowerCAmelCase__ :Tuple = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_lowerCAmelCase ): # If it is the right children
lowerCAmelCase__ :Union[str, Any] = new_children
else:
lowerCAmelCase__ :int = new_children
else:
lowerCAmelCase__ :Optional[Any] = new_children
def snake_case_ ( self , _lowerCAmelCase ):
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def snake_case_ ( self ):
'''simple docstring'''
return self.root is None
def snake_case_ ( self , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = Node(_lowerCAmelCase ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase__ :List[str] = new_node # set its root
else: # Tree is not empty
lowerCAmelCase__ :Optional[int] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase__ :int = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase__ :List[Any] = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase__ :Dict = new_node
break
else:
lowerCAmelCase__ :Dict = parent_node.right
lowerCAmelCase__ :Tuple = parent_node
def snake_case_ ( self , *_lowerCAmelCase ):
'''simple docstring'''
for value in values:
self.__insert(_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase ):
'''simple docstring'''
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
lowerCAmelCase__ :Optional[int] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase__ :Optional[int] = node.left if value < node.value else node.right
return node
def snake_case_ ( self , _lowerCAmelCase = None ):
'''simple docstring'''
if node is None:
if self.root is None:
return None
lowerCAmelCase__ :str = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase__ :List[str] = node.right
return node
def snake_case_ ( self , _lowerCAmelCase = None ):
'''simple docstring'''
if node is None:
lowerCAmelCase__ :str = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase__ :Dict = self.root
while node.left is not None:
lowerCAmelCase__ :Union[str, Any] = node.left
return node
def snake_case_ ( self , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.search(_lowerCAmelCase ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_lowerCAmelCase , _lowerCAmelCase )
elif node.left is None: # Has only right children
self.__reassign_nodes(_lowerCAmelCase , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_lowerCAmelCase , node.left )
else:
lowerCAmelCase__ :str = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase__ :List[Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def snake_case_ ( self , _lowerCAmelCase ):
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def snake_case_ ( self , _lowerCAmelCase=None ):
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
if node:
self.inorder(_lowerCAmelCase , node.left )
arr.append(node.value )
self.inorder(_lowerCAmelCase , node.right )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :list[int] = []
self.inorder(_lowerCAmelCase , _lowerCAmelCase ) # append all values to list using inorder traversal
return arr[k - 1]
def snake_case__ ( UpperCAmelCase : Node | None ):
lowerCAmelCase__ :List[Any] = []
if curr_node is not None:
lowerCAmelCase__ :str = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def snake_case__ ( ):
lowerCAmelCase__ :Union[str, Any] = (8, 3, 6, 1, 1_0, 1_4, 1_3, 4, 7)
lowerCAmelCase__ :Any = BinarySearchTree()
for i in testlist:
t.insert(UpperCAmelCase )
# Prints all the elements of the list in order traversal
print(UpperCAmelCase )
if t.search(6 ) is not None:
print("The value 6 exists" )
else:
print("The value 6 doesn't exist" )
if t.search(-1 ) is not None:
print("The value -1 exists" )
else:
print("The value -1 doesn't exist" )
if not t.empty():
print("Max Value: " , t.get_max().value ) # type: ignore
print("Min Value: " , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCAmelCase )
print(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 145
| 0
|
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "linear"
_lowerCAmelCase = "cosine"
_lowerCAmelCase = "cosine_with_restarts"
_lowerCAmelCase = "polynomial"
_lowerCAmelCase = "constant"
_lowerCAmelCase = "constant_with_warmup"
_lowerCAmelCase = "piecewise_constant"
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int = -1 ):
return LambdaLR(_lowerCamelCase , lambda _lowerCamelCase : 1 , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1.0 , _lowerCamelCase ) )
return 1.0
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : str , _lowerCamelCase : int = -1 ):
__a : Optional[int] = {}
__a : Any = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__a , __a : int = rule_str.split(""":""" )
__a : Optional[int] = int(_lowerCamelCase )
__a : str = float(_lowerCamelCase )
__a : int = value
__a : Dict = float(rule_list[-1] )
def create_rules_function(_lowerCamelCase : str , _lowerCamelCase : Tuple ):
def rule_func(_lowerCamelCase : int ) -> float:
__a : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__a : Optional[int] = create_rules_function(_lowerCamelCase , _lowerCamelCase )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : str=-1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0.5 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Any ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int = 1 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Optional[int] ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=1E-7 , _lowerCamelCase : Optional[int]=1.0 , _lowerCamelCase : Optional[int]=-1 ):
__a : Union[str, Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__a : Tuple = lr_init - lr_end
__a : int = num_training_steps - num_warmup_steps
__a : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
__a : List[str] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( _lowerCamelCase : Union[str, SchedulerType] , _lowerCamelCase : Optimizer , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : int = 1 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : int = -1 , ):
__a : int = SchedulerType(_lowerCamelCase )
__a : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowerCamelCase , last_epoch=_lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowerCamelCase , step_rules=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowerCamelCase , num_warmup_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , num_cycles=_lowerCamelCase , last_epoch=_lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , power=_lowerCamelCase , last_epoch=_lowerCamelCase , )
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
| 63
|
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=0 ):
'''simple docstring'''
__a : Any = 1.0 if scale is None else scale
__a : str = 0.0 if loc is None else loc
super().__init__(_lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowercase )] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.variance.sqrt()
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase , _lowercase , _lowercase , **_lowercase ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : str = args_dim
__a : List[Any] = nn.ModuleList([nn.Linear(_lowercase , _lowercase ) for dim in args_dim.values()] )
__a : Dict = domain_map
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = [proj(_lowercase ) for proj in self.proj]
return self.domain_map(*_lowercase )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase ):
'''simple docstring'''
super().__init__()
__a : Optional[int] = function
def lowerCAmelCase__(self , _lowercase , *_lowercase ):
'''simple docstring'''
return self.function(_lowercase , *_lowercase )
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__(self , _lowercase = 1 ):
'''simple docstring'''
__a : Optional[int] = dim
__a : str = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*_lowercase )
else:
return Independent(self.distribution_class(*_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None , ):
'''simple docstring'''
__a : Tuple = self._base_distribution(_lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowercase , loc=_lowercase , scale=_lowercase , event_dim=self.event_dim )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return len(self.event_shape )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 0.0
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return ParameterProjection(
in_features=_lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCAmelCase__(self , *_lowercase ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def lowerCAmelCase__(_lowercase ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(_lowercase ) + 4.0 )) / 2.0
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : int = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__a : Optional[Any] = 2.0 + cls.squareplus(_lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : str = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = cls.squareplus(_lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a , __a : Optional[Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowercase , logits=_lowercase )
else:
return Independent(self.distribution_class(total_count=_lowercase , logits=_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None ):
'''simple docstring'''
__a , __a : List[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 63
| 1
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "gptj"
lowercase_ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : str , _lowerCAmelCase : str=50_400 , _lowerCAmelCase : int=2_048 , _lowerCAmelCase : Union[str, Any]=4_096 , _lowerCAmelCase : Tuple=28 , _lowerCAmelCase : List[str]=16 , _lowerCAmelCase : Dict=64 , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : str="gelu_new" , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : str=1E-5 , _lowerCAmelCase : Union[str, Any]=0.02 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Any=50_256 , _lowerCAmelCase : Optional[int]=50_256 , _lowerCAmelCase : Tuple=False , **_lowerCAmelCase : Tuple , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = n_positions
SCREAMING_SNAKE_CASE_ = n_embd
SCREAMING_SNAKE_CASE_ = n_layer
SCREAMING_SNAKE_CASE_ = n_head
SCREAMING_SNAKE_CASE_ = n_inner
SCREAMING_SNAKE_CASE_ = rotary_dim
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = resid_pdrop
SCREAMING_SNAKE_CASE_ = embd_pdrop
SCREAMING_SNAKE_CASE_ = attn_pdrop
SCREAMING_SNAKE_CASE_ = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = eos_token_id
super().__init__(
bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , **_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : PretrainedConfig , _lowerCAmelCase : str = "default" , _lowerCAmelCase : List[PatchingSpec] = None , _lowerCAmelCase : bool = False , ):
super().__init__(_lowerCAmelCase , task=_lowerCAmelCase , patching_specs=_lowerCAmelCase , use_past=_lowerCAmelCase )
if not getattr(self._config , 'pad_token_id' , _lowerCAmelCase ):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE_ = 0
@property
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs' )
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'past_sequence + sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowerCAmelCase_ ( self : Dict ):
return self._config.n_layer
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return self._config.n_head
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE_ = super(_lowerCAmelCase , self ).generate_dummy_inputs(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE_ = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_ = seqlen + 2
SCREAMING_SNAKE_CASE_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE_ = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE_ = common_inputs['attention_mask']
if self.use_past:
SCREAMING_SNAKE_CASE_ = ordered_inputs['attention_mask'].dtype
SCREAMING_SNAKE_CASE_ = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase_ ( self : int ):
return 13
| 31
|
from __future__ import annotations
import math
def _lowercase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : bool , __UpperCamelCase : list[int] , __UpperCamelCase : float ):
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , )
)
def _lowercase ( ):
snake_case__ = [90, 23, 6, 33, 21, 65, 123, 3_4423]
snake_case__ = math.log(len(__UpperCamelCase ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 214
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase ( __UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = RoCBertTokenizer
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = filter_non_english
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
_snake_case: int = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
_snake_case: Union[str, Any] = {}
_snake_case: List[str] = {}
for i, value in enumerate(__snake_case ):
_snake_case: Union[str, Any] = i
_snake_case: Any = i
_snake_case: List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case: List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
_snake_case: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(__snake_case , __snake_case , ensure_ascii=__snake_case )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(__snake_case , __snake_case , ensure_ascii=__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
_snake_case: List[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_snake_case: int = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(__snake_case , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__snake_case ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__snake_case ) , [5, 6, 2, 5, 7, 8] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case: Union[str, Any] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
_snake_case: Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: int = RoCBertBasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
_snake_case: Tuple = RoCBertBasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: int = RoCBertBasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: Optional[int] = RoCBertBasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
_snake_case: Tuple = RoCBertBasicTokenizer(do_lower_case=__snake_case , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case: Any = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_snake_case: List[Any] = {}
for i, token in enumerate(__snake_case ):
_snake_case: Dict = i
_snake_case: Tuple = RoCBertWordpieceTokenizer(vocab=__snake_case , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: List[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__snake_case ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
_snake_case: List[str] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__snake_case ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case: Optional[int] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
_snake_case: Union[str, Any] = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_snake_case: str = tokenizer_r.encode_plus(
__snake_case , return_attention_mask=__snake_case , return_token_type_ids=__snake_case , return_offsets_mapping=__snake_case , add_special_tokens=__snake_case , )
_snake_case: Optional[Any] = tokenizer_r.do_lower_case if hasattr(__snake_case , 'do_lower_case' ) else False
_snake_case: Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: Union[str, Any] = ['的', '人', '有']
_snake_case: str = ''.join(__snake_case )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case: Optional[Any] = True
_snake_case: Union[str, Any] = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
_snake_case: Any = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
_snake_case: Tuple = tokenizer_p.encode(__snake_case , add_special_tokens=__snake_case )
_snake_case: Any = tokenizer_r.encode(__snake_case , add_special_tokens=__snake_case )
_snake_case: Optional[Any] = tokenizer_r.convert_ids_to_tokens(__snake_case )
_snake_case: Optional[int] = tokenizer_p.convert_ids_to_tokens(__snake_case )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(__snake_case , __snake_case )
_snake_case: Dict = False
_snake_case: List[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
_snake_case: int = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
_snake_case: str = tokenizer_r.encode(__snake_case , add_special_tokens=__snake_case )
_snake_case: int = tokenizer_p.encode(__snake_case , add_special_tokens=__snake_case )
_snake_case: Tuple = tokenizer_r.convert_ids_to_tokens(__snake_case )
_snake_case: Tuple = tokenizer_p.convert_ids_to_tokens(__snake_case )
# it is expected that only the first Chinese character is not preceded by "##".
_snake_case: str = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(__snake_case )
]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
_snake_case: Union[str, Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_snake_case: int = tokenizer.encode('你好' , add_special_tokens=__snake_case )
_snake_case: Tuple = tokenizer.encode('你是谁' , add_special_tokens=__snake_case )
_snake_case: Dict = tokenizer.build_inputs_with_special_tokens(__snake_case )
_snake_case: Optional[int] = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: List[str] = self.get_tokenizers(do_lower_case=__snake_case )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_snake_case: int = '你好,你是谁'
_snake_case: Optional[int] = tokenizer.tokenize(__snake_case )
_snake_case: Dict = tokenizer.convert_tokens_to_ids(__snake_case )
_snake_case: Optional[int] = tokenizer.convert_tokens_to_shape_ids(__snake_case )
_snake_case: Union[str, Any] = tokenizer.convert_tokens_to_pronunciation_ids(__snake_case )
_snake_case: List[Any] = tokenizer.prepare_for_model(
__snake_case , __snake_case , __snake_case , add_special_tokens=__snake_case )
_snake_case: Union[str, Any] = tokenizer.encode_plus(__snake_case , add_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
| 700
|
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
A : Optional[Any] = tuple[int, int]
class lowerCamelCase :
def __init__( self : Tuple , __snake_case : set[int] , __snake_case : Mapping[EdgeT, int] ):
'''simple docstring'''
_snake_case: set[int] = vertices
_snake_case: dict[EdgeT, int] = {
(min(__snake_case ), max(__snake_case )): weight for edge, weight in edges.items()
}
def SCREAMING_SNAKE_CASE_ ( self : int , __snake_case : EdgeT , __snake_case : int ):
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_snake_case: Dict = weight
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: Graph = Graph({min(self.vertices )} , {} )
_snake_case: EdgeT
_snake_case: int
_snake_case: EdgeT
_snake_case: int
while len(subgraph.vertices ) < len(self.vertices ):
_snake_case: List[str] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_snake_case: Optional[Any] = edge
_snake_case: Optional[int] = weight
subgraph.add_edge(__snake_case , __snake_case )
return subgraph
def lowercase_ ( lowercase__ = "p107_network.txt" ) ->int:
_snake_case: str = os.path.abspath(os.path.dirname(lowercase__ ) )
_snake_case: str = os.path.join(lowercase__ , lowercase__ )
_snake_case: dict[EdgeT, int] = {}
_snake_case: list[str]
_snake_case: int
_snake_case: int
with open(lowercase__ ) as f:
_snake_case: Tuple = f.read().strip().split('\n' )
_snake_case: Tuple = [line.split(',' ) for line in data]
for edgea in range(1 , len(lowercase__ ) ):
for edgea in range(lowercase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
_snake_case: int = int(adjaceny_matrix[edgea][edgea] )
_snake_case: Graph = Graph(set(range(len(lowercase__ ) ) ) , lowercase__ )
_snake_case: Graph = graph.prims_algorithm()
_snake_case: int = sum(graph.edges.values() )
_snake_case: int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 273
| 0
|
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=0.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = "geglu" , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = "layer_norm" , lowerCAmelCase__ = False , ) -> Optional[int]:
super().__init__()
a : Union[str, Any] = only_cross_attention
a : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
a : str = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
f""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
a : Tuple = AdaLayerNorm(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.use_ada_layer_norm_zero:
a : Union[str, Any] = AdaLayerNormZero(lowerCAmelCase__ , lowerCAmelCase__ )
else:
a : List[Any] = nn.LayerNorm(lowerCAmelCase__ , elementwise_affine=lowerCAmelCase__ )
a : Optional[int] = Attention(
query_dim=lowerCAmelCase__ , heads=lowerCAmelCase__ , dim_head=lowerCAmelCase__ , dropout=lowerCAmelCase__ , bias=lowerCAmelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=lowerCAmelCase__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
a : int = (
AdaLayerNorm(lowerCAmelCase__ , lowerCAmelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(lowerCAmelCase__ , elementwise_affine=lowerCAmelCase__ )
)
a : Dict = Attention(
query_dim=lowerCAmelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=lowerCAmelCase__ , dim_head=lowerCAmelCase__ , dropout=lowerCAmelCase__ , bias=lowerCAmelCase__ , upcast_attention=lowerCAmelCase__ , ) # is self-attn if encoder_hidden_states is none
else:
a : Any = None
a : List[Any] = None
# 3. Feed-forward
a : Any = nn.LayerNorm(lowerCAmelCase__ , elementwise_affine=lowerCAmelCase__ )
a : Any = FeedForward(lowerCAmelCase__ , dropout=lowerCAmelCase__ , activation_fn=lowerCAmelCase__ , final_dropout=lowerCAmelCase__ )
# let chunk size default to None
a : Any = None
a : Tuple = 0
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
# Sets chunk feed-forward
a : List[str] = chunk_size
a : Optional[int] = dim
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Any:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
a : Union[str, Any] = self.norma(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.use_ada_layer_norm_zero:
a, a, a, a, a : Dict = self.norma(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hidden_dtype=hidden_states.dtype )
else:
a : Union[str, Any] = self.norma(lowerCAmelCase__ )
a : List[str] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
a : Optional[Any] = self.attna(
lowerCAmelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
if self.use_ada_layer_norm_zero:
a : Any = gate_msa.unsqueeze(1 ) * attn_output
a : str = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
a : Tuple = (
self.norma(lowerCAmelCase__ , lowerCAmelCase__ ) if self.use_ada_layer_norm else self.norma(lowerCAmelCase__ )
)
a : List[str] = self.attna(
lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : int = attn_output + hidden_states
# 3. Feed-forward
a : Optional[Any] = self.norma(lowerCAmelCase__ )
if self.use_ada_layer_norm_zero:
a : Any = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
a : str = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
a : Optional[int] = torch.cat(
[self.ff(lowerCAmelCase__ ) for hid_slice in norm_hidden_states.chunk(lowerCAmelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
a : int = self.ff(lowerCAmelCase__ )
if self.use_ada_layer_norm_zero:
a : str = gate_mlp.unsqueeze(1 ) * ff_output
a : Dict = ff_output + hidden_states
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = 4 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = "geglu" , lowerCAmelCase__ = False , ) -> List[str]:
super().__init__()
a : int = int(dim * mult )
a : Union[str, Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
a : List[str] = GELU(lowerCAmelCase__ , lowerCAmelCase__ )
if activation_fn == "gelu-approximate":
a : Tuple = GELU(lowerCAmelCase__ , lowerCAmelCase__ , approximate="tanh" )
elif activation_fn == "geglu":
a : Tuple = GEGLU(lowerCAmelCase__ , lowerCAmelCase__ )
elif activation_fn == "geglu-approximate":
a : Tuple = ApproximateGELU(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = nn.ModuleList([] )
# project in
self.net.append(lowerCAmelCase__ )
# project dropout
self.net.append(nn.Dropout(lowerCAmelCase__ ) )
# project out
self.net.append(nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(lowerCAmelCase__ ) )
def __a ( self , lowerCAmelCase__ ) -> int:
for module in self.net:
a : str = module(lowerCAmelCase__ )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = "none" ) -> Optional[Any]:
super().__init__()
a : str = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
a : Tuple = approximate
def __a ( self , lowerCAmelCase__ ) -> Tuple:
if gate.device.type != "mps":
return F.gelu(lowerCAmelCase__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __a ( self , lowerCAmelCase__ ) -> List[Any]:
a : Optional[int] = self.proj(lowerCAmelCase__ )
a : Tuple = self.gelu(lowerCAmelCase__ )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
super().__init__()
a : Optional[Any] = nn.Linear(lowerCAmelCase__ , dim_out * 2 )
def __a ( self , lowerCAmelCase__ ) -> Optional[Any]:
if gate.device.type != "mps":
return F.gelu(lowerCAmelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __a ( self , lowerCAmelCase__ ) -> List[str]:
a, a : Tuple = self.proj(lowerCAmelCase__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(lowerCAmelCase__ )
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
super().__init__()
a : Optional[int] = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> List[Any]:
a : str = self.proj(lowerCAmelCase__ )
return x * torch.sigmoid(1.702 * x )
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
super().__init__()
a : List[str] = nn.Embedding(lowerCAmelCase__ , lowerCAmelCase__ )
a : Optional[Any] = nn.SiLU()
a : List[Any] = nn.Linear(lowerCAmelCase__ , embedding_dim * 2 )
a : Union[str, Any] = nn.LayerNorm(lowerCAmelCase__ , elementwise_affine=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
a : Any = self.linear(self.silu(self.emb(lowerCAmelCase__ ) ) )
a, a : Tuple = torch.chunk(lowerCAmelCase__ , 2 )
a : Any = self.norm(lowerCAmelCase__ ) * (1 + scale) + shift
return x
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
super().__init__()
a : int = CombinedTimestepLabelEmbeddings(lowerCAmelCase__ , lowerCAmelCase__ )
a : Any = nn.SiLU()
a : List[str] = nn.Linear(lowerCAmelCase__ , 6 * embedding_dim , bias=lowerCAmelCase__ )
a : Dict = nn.LayerNorm(lowerCAmelCase__ , elementwise_affine=lowerCAmelCase__ , eps=1E-6 )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ) -> int:
a : Optional[Any] = self.linear(self.silu(self.emb(lowerCAmelCase__ , lowerCAmelCase__ , hidden_dtype=lowerCAmelCase__ ) ) )
a, a, a, a, a, a : Dict = emb.chunk(6 , dim=1 )
a : Dict = self.norm(lowerCAmelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = 1E-5 ) -> List[str]:
super().__init__()
a : str = num_groups
a : List[str] = eps
if act_fn is None:
a : List[Any] = None
else:
a : str = get_activation(lowerCAmelCase__ )
a : int = nn.Linear(lowerCAmelCase__ , out_dim * 2 )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
if self.act:
a : Optional[int] = self.act(lowerCAmelCase__ )
a : Optional[int] = self.linear(lowerCAmelCase__ )
a : Optional[int] = emb[:, :, None, None]
a, a : Tuple = emb.chunk(2 , dim=1 )
a : Any = F.group_norm(lowerCAmelCase__ , self.num_groups , eps=self.eps )
a : Optional[Any] = x * (1 + scale) + shift
return x
| 633
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a : int = logging.get_logger(__name__)
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[int] =["""pixel_values"""]
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = PIL.Image.BICUBIC , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 / 255 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> None:
super().__init__(**lowerCAmelCase__ )
a : Optional[int] = size if size is not None else {"height": 256, "width": 256}
a : Tuple = get_size_dict(lowerCAmelCase__ )
a : Any = crop_size if crop_size is not None else {"height": 224, "width": 224}
a : Dict = get_size_dict(lowerCAmelCase__ , param_name="crop_size" )
a : Tuple = do_resize
a : int = size
a : List[str] = resample
a : List[Any] = do_center_crop
a : List[Any] = crop_size
a : Union[str, Any] = do_rescale
a : List[Any] = rescale_factor
a : Union[str, Any] = do_normalize
a : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = PIL.Image.BICUBIC , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
a : Optional[int] = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
lowerCAmelCase__ , size=(size["height"], size["width"]) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
a : List[str] = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(lowerCAmelCase__ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Union[str, Any]:
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ) -> PIL.Image.Image:
a : List[Any] = do_resize if do_resize is not None else self.do_resize
a : str = resample if resample is not None else self.resample
a : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
a : str = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Dict = do_normalize if do_normalize is not None else self.do_normalize
a : List[str] = image_mean if image_mean is not None else self.image_mean
a : Union[str, Any] = image_std if image_std is not None else self.image_std
a : str = size if size is not None else self.size
a : Any = get_size_dict(lowerCAmelCase__ )
a : Dict = crop_size if crop_size is not None else self.crop_size
a : List[str] = get_size_dict(lowerCAmelCase__ , param_name="crop_size" )
a : str = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
a : Optional[Any] = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
a : Optional[int] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
a : Dict = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
a : Any = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
a : Optional[Any] = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
a : Optional[Any] = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
a : Tuple = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 633
| 1
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase : list , _lowerCamelCase : list , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
if index == number_of_items:
return 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = knapsack(a_ , a_ , a_ , a_ , index + 1 )
if weights[index] <= max_weight:
lowerCamelCase_ = values[index] + knapsack(
a_ , a_ , a_ , max_weight - weights[index] , index + 1 )
return max(a_ , a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__lowercase :Optional[int] = KandinskyVaaImgaImgPipeline
__lowercase :Dict = ["image_embeds", "negative_image_embeds", "image"]
__lowercase :Union[str, Any] = [
"image_embeds",
"negative_image_embeds",
"image",
]
__lowercase :str = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__lowercase :Union[str, Any] = False
@property
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
return 32
@property
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return 32
@property
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return 100
@property
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCamelCase_ = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.dummy_unet
lowerCamelCase_ = self.dummy_movq
lowerCamelCase_ = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00_085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowerCamelCase_ = DDIMScheduler(**UpperCamelCase__ )
lowerCamelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Any:
'''simple docstring'''
lowerCamelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowerCamelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase__ )
# create init_image
lowerCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((256, 256) )
if str(UpperCamelCase__ ).startswith('''mps''' ):
lowerCamelCase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowerCamelCase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowerCamelCase_ = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = '''cpu'''
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**UpperCamelCase__ )
lowerCamelCase_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
lowerCamelCase_ = output.images
lowerCamelCase_ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array(
[0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
lowerCamelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowerCamelCase_ = '''A red cartoon frog, 4k'''
lowerCamelCase_ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
lowerCamelCase_ = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
lowerCamelCase_ = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase_ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase_ , lowerCamelCase_ = pipe_prior(
UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowerCamelCase_ = pipeline(
image=UpperCamelCase__ , image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
lowerCamelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 66
| 0
|
"""simple docstring"""
import os
import sys
import unittest
A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
A = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
A = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class a__ ( unittest.TestCase ):
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Any = get_test_to_tester_mapping(UpperCamelCase_)
__UpperCAmelCase : str = get_test_to_tester_mapping(UpperCamelCase_)
__UpperCAmelCase : List[str] = {"BertModelTest": "BertModelTester"}
__UpperCAmelCase : Optional[Any] = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(UpperCamelCase_) , UpperCamelCase_)
self.assertEqual(get_test_info.to_json(UpperCamelCase_) , UpperCamelCase_)
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : List[str] = get_model_to_test_mapping(UpperCamelCase_)
__UpperCAmelCase : str = get_model_to_test_mapping(UpperCamelCase_)
__UpperCAmelCase : int = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
__UpperCAmelCase : Optional[int] = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase_) , UpperCamelCase_)
self.assertEqual(get_test_info.to_json(UpperCamelCase_) , UpperCamelCase_)
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = get_model_to_tester_mapping(UpperCamelCase_)
__UpperCAmelCase : Any = get_model_to_tester_mapping(UpperCamelCase_)
__UpperCAmelCase : List[Any] = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
__UpperCAmelCase : Optional[int] = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase_) , UpperCamelCase_)
self.assertEqual(get_test_info.to_json(UpperCamelCase_) , UpperCamelCase_)
| 77
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[Any]=13 , lowerCamelCase__ : Any=32 , lowerCamelCase__ : List[str]=3 , lowerCamelCase__ : str=4 , lowerCamelCase__ : Tuple=[10, 20, 30, 40] , lowerCamelCase__ : Any=[2, 2, 3, 2] , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : int=37 , lowerCamelCase__ : Optional[int]="gelu" , lowerCamelCase__ : str=10 , lowerCamelCase__ : Dict=0.0_2 , lowerCamelCase__ : Optional[int]=["stage2", "stage3", "stage4"] , lowerCamelCase__ : Dict=[2, 3, 4] , lowerCamelCase__ : Any=None , ) -> int:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = num_stages
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = is_training
__lowercase = use_labels
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = num_labels
__lowercase = initializer_range
__lowercase = out_features
__lowercase = out_indices
__lowercase = scope
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : int ) -> Dict:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : Any ) -> Tuple:
"""simple docstring"""
__lowercase = ConvNextModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = ConvNextForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ConvNextBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowercase = None
__lowercase = ConvNextBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : int = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : Any = True
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : int = False
UpperCamelCase_ : Union[str, Any] = False
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = ConvNextModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self : List[str] ) -> Any:
"""simple docstring"""
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCamelCase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] ):
__lowercase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = ConvNextModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _A( ) -> Tuple:
'''simple docstring'''
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self : Any ) -> str:
"""simple docstring"""
__lowercase = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(lowerCamelCase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCamelCase__ )
# verify the logits
__lowercase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__lowercase = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
@require_torch
class a ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = (ConvNextBackbone,) if is_torch_available() else ()
UpperCamelCase_ : str = ConvNextConfig
UpperCamelCase_ : Optional[int] = False
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ConvNextModelTester(self )
| 332
| 0
|
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =BertGenerationTokenizer
a : Union[str, Any] =False
a : Tuple =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase : Optional[int] = BertGenerationTokenizer(snake_case__ , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = "<s>"
lowerCAmelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(snake_case__ ) , 1_002 )
def lowercase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = BertGenerationTokenizer(snake_case__ , keep_accents=snake_case__ )
lowerCAmelCase : Dict = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [285, 46, 10, 170, 382] , )
lowerCAmelCase : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = "Hello World!"
lowerCAmelCase : Optional[int] = [18_536, 2_260, 101]
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
lowerCAmelCase : Dict = [
871,
419,
358,
946,
991,
2_521,
452,
358,
1_357,
387,
7_751,
3_536,
112,
985,
456,
126,
865,
938,
5_400,
5_734,
458,
1_368,
467,
786,
2_462,
5_246,
1_159,
633,
865,
4_519,
457,
582,
852,
2_557,
427,
916,
508,
405,
34_324,
497,
391,
408,
11_342,
1_244,
385,
100,
938,
985,
456,
574,
362,
12_597,
3_200,
3_129,
1_172,
]
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@require_torch
@slow
def lowercase__ ( self ):
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCAmelCase : List[str] = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase : str = " ".join(snake_case__ )
lowerCAmelCase : Dict = self.big_tokenizer.encode_plus(snake_case__ , return_tensors="pt" , return_token_type_ids=snake_case__ )
lowerCAmelCase : Any = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=snake_case__ )
lowerCAmelCase : Optional[int] = BertGenerationConfig()
lowerCAmelCase : int = BertGenerationEncoder(snake_case__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**snake_case__ )
model(**snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = {"input_ids": [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 681
|
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Tuple = OmegaConf.load(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
lowerCAmelCase : int = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase : Tuple = {}
lowerCAmelCase : Dict = "first_stage_model."
for key in keys:
if key.startswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[str] = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Tuple = "model.diffusion_model."
for key in keys:
if key.startswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : str = state_dict[key]
lowerCAmelCase : List[str] = config.model.params.first_stage_config.params
lowerCAmelCase : List[Any] = config.model.params.unet_config.params
lowerCAmelCase : Union[str, Any] = VQModel(**SCREAMING_SNAKE_CASE ).eval()
vqvae.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = UNetLDMModel(**SCREAMING_SNAKE_CASE ).eval()
unet.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=SCREAMING_SNAKE_CASE , )
lowerCAmelCase : Tuple = LDMPipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
pipeline.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
lowerCAmelCase__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 681
| 1
|
"""simple docstring"""
def _lowerCAmelCase(a : List[str] , a : Any , a : List[Any] , a : Optional[Any] ) -> int:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =len(UpperCAmelCase_ ), len(grid[0] )
if (
min(UpperCAmelCase_ , UpperCAmelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_SCREAMING_SNAKE_CASE =0
count += depth_first_search(UpperCAmelCase_ , row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , row - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col - 1 , UpperCAmelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 255
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=7 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Optional[int]=30 , UpperCAmelCase_ : Any=400 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Dict=0.9 , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , )-> List[str]:
"""simple docstring"""
UpperCamelCase = size if size is not None else {"shortest_edge": 30}
UpperCamelCase = crop_size if crop_size is not None else {"height": 30, "width": 30}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize_and_center_crop
UpperCamelCase = size
UpperCamelCase = crop_pct
UpperCamelCase = crop_size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> str:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ : List[Any] = PoolFormerImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> str:
"""simple docstring"""
UpperCamelCase = PoolFormerImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "crop_pct" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std" ) )
def _SCREAMING_SNAKE_CASE ( self : int )-> List[Any]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Any:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Dict:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> List[str]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : str )-> str:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 554
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def __UpperCamelCase ( _A , _A=False ):
lowerCAmelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __UpperCamelCase ( _A , _A , _A=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase_ = ''''''
else:
lowerCAmelCase_ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_A , _A )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = dct.pop(_A )
lowerCAmelCase_ = val
def __UpperCamelCase ( ):
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = ViTConfig()
lowerCAmelCase_ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowerCAmelCase_ = True
lowerCAmelCase_ = int(vit_name[-12:-10] )
lowerCAmelCase_ = int(vit_name[-9:-6] )
else:
lowerCAmelCase_ = 1000
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = int(vit_name[-6:-4] )
lowerCAmelCase_ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
lowerCAmelCase_ = 192
lowerCAmelCase_ = 768
lowerCAmelCase_ = 12
lowerCAmelCase_ = 3
elif vit_name[9:].startswith('''small''' ):
lowerCAmelCase_ = 384
lowerCAmelCase_ = 1536
lowerCAmelCase_ = 12
lowerCAmelCase_ = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
lowerCAmelCase_ = 768
lowerCAmelCase_ = 2304
lowerCAmelCase_ = 8
lowerCAmelCase_ = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
lowerCAmelCase_ = 1024
lowerCAmelCase_ = 4096
lowerCAmelCase_ = 24
lowerCAmelCase_ = 16
elif vit_name[4:].startswith('''huge''' ):
lowerCAmelCase_ = 1280
lowerCAmelCase_ = 5120
lowerCAmelCase_ = 32
lowerCAmelCase_ = 16
# load original model from timm
lowerCAmelCase_ = timm.create_model(_A , pretrained=_A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase_ = timm_model.state_dict()
if base_model:
remove_classification_head_(_A )
lowerCAmelCase_ = create_rename_keys(_A , _A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , _A , _A )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCAmelCase_ = ViTModel(_A ).eval()
else:
lowerCAmelCase_ = ViTForImageClassification(_A ).eval()
model.load_state_dict(_A )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowerCAmelCase_ = DeiTImageProcessor(size=config.image_size )
else:
lowerCAmelCase_ = ViTImageProcessor(size=config.image_size )
lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCAmelCase_ = encoding['''pixel_values''']
lowerCAmelCase_ = model(_A )
if base_model:
lowerCAmelCase_ = timm_model.forward_features(_A )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_A , outputs.pooler_output , atol=1E-3 )
else:
lowerCAmelCase_ = timm_model(_A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_A , outputs.logits , atol=1E-3 )
Path(_A ).mkdir(exist_ok=_A )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_A )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_A = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 325
|
import torch
from transformers import AutoModel
class A ( torch.nn.Module ):
def __init__( self, UpperCamelCase__="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(UpperCamelCase__, self ).__init__()
lowerCAmelCase_ = AutoModel.from_pretrained(UpperCamelCase__, return_dict=UpperCamelCase__ )
lowerCAmelCase_ = torch.nn.CosineSimilarity(3, 1E-08 )
lowerCAmelCase_ = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
return self.bert(**UpperCamelCase__ ).last_hidden_state
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return token_embeddings.sum(2, keepdim=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(UpperCamelCase__, UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = W_supports['''sizes'''].tolist()
lowerCAmelCase_ = W_supports['''start_token_id'''].item()
lowerCAmelCase_ = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowerCAmelCase_ = self.BERT(**UpperCamelCase__ )
lowerCAmelCase_ = self.BERT(**UpperCamelCase__ )
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = W_supports['''input_ids'''] == start_token_id
lowerCAmelCase_ = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(UpperCamelCase__ ):
if i == 0:
lowerCAmelCase_ = 0
else:
lowerCAmelCase_ = support_sizes[i - 1]
lowerCAmelCase_ = S[s : s + size][start_token_masks[s : s + size]]
lowerCAmelCase_ = S[s : s + size][end_token_masks[s : s + size]]
lowerCAmelCase_ = torch.matmul(q[i], s_start.T ).sum(1 ).softmax(0 )
lowerCAmelCase_ = torch.matmul(q[i], s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowerCAmelCase_ = torch.vstack((p_starts, p_start) )
lowerCAmelCase_ = torch.vstack((p_ends, p_end) )
else:
lowerCAmelCase_ = p_start
lowerCAmelCase_ = p_end
return p_starts, p_ends
| 325
| 1
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class _snake_case ( UpperCAmelCase_ ):
def __init__( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , """vision""")
self.check_model_type(SCREAMING_SNAKE_CASE_)
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
if "text_queries" in kwargs:
lowercase__ : Any = kwargs.pop("""text_queries""")
if isinstance(SCREAMING_SNAKE_CASE_ , (str, Image.Image)):
lowercase__ : Optional[Any] = {"""image""": image, """candidate_labels""": candidate_labels}
else:
lowercase__ : int = image
lowercase__ : List[str] = super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
return results
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = {}
if "threshold" in kwargs:
lowercase__ : List[Any] = kwargs["""threshold"""]
if "top_k" in kwargs:
lowercase__ : int = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = load_image(inputs["""image"""])
lowercase__ : Any = inputs["""candidate_labels"""]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
lowercase__ : List[str] = candidate_labels.split(""",""")
lowercase__ : Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
lowercase__ : Union[str, Any] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
yield {
"is_last": i == len(SCREAMING_SNAKE_CASE_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = model_inputs.pop("""target_size""")
lowercase__ : Optional[int] = model_inputs.pop("""candidate_label""")
lowercase__ : Dict = model_inputs.pop("""is_last""")
lowercase__ : Union[str, Any] = self.model(**SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : Union[str, Any] = []
for model_output in model_outputs:
lowercase__ : Optional[int] = model_output["""candidate_label"""]
lowercase__ : Tuple = BaseModelOutput(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.image_processor.post_process_object_detection(
outputs=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ , target_sizes=model_output["""target_size"""])[0]
for index in outputs["scores"].nonzero():
lowercase__ : Optional[Any] = outputs["""scores"""][index].item()
lowercase__ : Optional[Any] = self._get_bounding_box(outputs["""boxes"""][index][0])
lowercase__ : Tuple = {"""score""": score, """label""": label, """box""": box}
results.append(SCREAMING_SNAKE_CASE_)
lowercase__ : int = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: x["score"] , reverse=SCREAMING_SNAKE_CASE_)
if top_k:
lowercase__ : Any = results[:top_k]
return results
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""")
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = box.int().tolist()
lowercase__ : Optional[int] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 12
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=2, lowerCamelCase=99, lowerCamelCase=0, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase="last", lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=0, ) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : List[str] = seq_length
_lowercase : int = is_training
_lowercase : List[str] = use_input_lengths
_lowercase : int = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : Union[str, Any] = gelu_activation
_lowercase : List[str] = sinusoidal_embeddings
_lowercase : str = causal
_lowercase : Optional[int] = asm
_lowercase : Union[str, Any] = n_langs
_lowercase : List[Any] = vocab_size
_lowercase : Any = n_special
_lowercase : Any = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : List[str] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : int = num_labels
_lowercase : Optional[int] = num_choices
_lowercase : Optional[Any] = summary_type
_lowercase : Optional[Any] = use_proj
_lowercase : int = scope
_lowercase : List[Any] = bos_token_id
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : int = None
if self.use_input_lengths:
_lowercase : Dict = (
ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
_lowercase : Tuple = None
_lowercase : int = None
_lowercase : int = None
if self.use_labels:
_lowercase : str = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : str = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Dict = ids_tensor([self.batch_size], 2).float()
_lowercase : Tuple = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, num_labels=self.num_labels, bos_token_id=self.bos_token_id, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = XLMModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : str = model(lowerCamelCase, lengths=lowerCamelCase, langs=lowerCamelCase)
_lowercase : int = model(lowerCamelCase, langs=lowerCamelCase)
_lowercase : Any = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[Any]:
"""simple docstring"""
_lowercase : Dict = XLMWithLMHeadModel(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> str:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnsweringSimple(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase)
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
_lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnswering(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase)
_lowercase : List[Any] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, p_mask=lowerCamelCase, )
_lowercase : List[str] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, )
((_lowercase) , ) : Optional[Any] = result_with_labels.to_tuple()
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
((_lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, ())
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = XLMForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
_lowercase : Optional[int] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.num_labels
_lowercase : str = XLMForTokenClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Dict:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_choices
_lowercase : Optional[int] = XLMForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Optional[Any] = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : List[str] = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[Any] = config_and_inputs
_lowercase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, _a, unittest.TestCase ):
lowercase_ : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ : Optional[int] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase_ : Union[str, Any] = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=False) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_lowercase : Any = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
_lowercase : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
return inputs_dict
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = XLMModelTester(self)
_lowercase : List[str] = ConfigTester(self, config_class=lowerCamelCase, emb_dim=37)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> int:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_attentions in attentions], [True] * len(lowerCamelCase))
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : Dict = min_length + idx + 1
_lowercase : int = min_length + idx + 1
_lowercase : Dict = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(lowerCamelCase))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> List[Any]:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_hidden_states in hidden_states], [True] * len(lowerCamelCase), )
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : int = min_length + idx + 1
_lowercase : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(lowerCamelCase), )
pass
@slow
def UpperCamelCase ( self) -> int:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Dict = XLMModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([[14, 4_47]], dtype=torch.long, device=lowerCamelCase) # the president
_lowercase : Any = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_lowercase : str = model.generate(lowerCamelCase, do_sample=lowerCamelCase)
self.assertListEqual(output_ids[0].cpu().numpy().tolist(), lowerCamelCase)
| 89
| 0
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ["""image_processor""", """tokenizer"""]
__UpperCamelCase : Union[str, Any] = """CLIPImageProcessor"""
__UpperCamelCase : int = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[Any] , snake_case_ : Any=None , snake_case_ : List[str]=None , **snake_case_ : Union[str, Any] ):
UpperCamelCase_: Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , snake_case_ , )
UpperCamelCase_: Optional[int] = kwargs.pop("""feature_extractor""" )
UpperCamelCase_: str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(snake_case_ , snake_case_ )
def __call__( self : Tuple , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=None , snake_case_ : Any=None , **snake_case_ : List[str] ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCamelCase_: List[str] = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if images is not None:
UpperCamelCase_: str = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is not None and images is not None:
UpperCamelCase_: Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def lowerCAmelCase__ ( self : Tuple , *snake_case_ : Optional[Any] , **snake_case_ : Optional[Any] ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , *snake_case_ : int , **snake_case_ : str ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[str] = self.tokenizer.model_input_names
UpperCamelCase_: List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 670
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = logging.get_logger("""transformers.models.speecht5""")
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
hf_model.apply_weight_norm()
UpperCamelCase_: Union[str, Any] = checkpoint["""input_conv.weight_g"""]
UpperCamelCase_: Optional[int] = checkpoint["""input_conv.weight_v"""]
UpperCamelCase_: List[Any] = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCamelCase_: Dict = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCamelCase_: Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCamelCase_: int = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCamelCase_: int = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase_: Tuple = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase_: List[str] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]:
if config_path is not None:
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase )
else:
UpperCamelCase_: str = SpeechTaHifiGanConfig()
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGan(lowerCamelCase )
UpperCamelCase_: str = torch.load(lowerCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = np.load(lowerCamelCase )
UpperCamelCase_: int = stats[0].reshape(-1 )
UpperCamelCase_: Union[str, Any] = stats[1].reshape(-1 )
UpperCamelCase_: Dict = torch.from_numpy(lowerCamelCase ).float()
UpperCamelCase_: Optional[Any] = torch.from_numpy(lowerCamelCase ).float()
model.save_pretrained(lowerCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 670
| 1
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Any = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 8 , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = do_rescale
lowercase__ : List[Any] = rescale_factor
lowercase__ : Tuple = do_pad
lowercase__ : Optional[Any] = pad_size
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
lowercase__ , lowercase__ : Optional[int] = get_image_size(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = (old_height // size + 1) * size - old_height
lowercase__ : str = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = do_pad if do_pad is not None else self.do_pad
lowercase__ : Optional[Any] = pad_size if pad_size is not None else self.pad_size
lowercase__ : str = make_list_of_images(SCREAMING_SNAKE_CASE_)
if not valid_images(SCREAMING_SNAKE_CASE_):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
# All transformations expect numpy arrays.
lowercase__ : List[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE_) for image in images]
if do_rescale:
lowercase__ : str = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_) for image in images]
if do_pad:
lowercase__ : List[str] = [self.pad(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_) for image in images]
lowercase__ : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) for image in images]
lowercase__ : Dict = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_)
| 12
|
from math import factorial
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> float:
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) or not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
lowercase__ : Dict = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
lowercase__ : Tuple = float(factorial(SCREAMING_SNAKE_CASE_ ) )
coefficient /= factorial(SCREAMING_SNAKE_CASE_ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 397
| 0
|
'''simple docstring'''
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = PriorTransformer
SCREAMING_SNAKE_CASE : List[Any] = "hidden_states"
@property
def lowerCamelCase ( self ):
UpperCAmelCase__ : Optional[Any] = 4
UpperCAmelCase__ : Tuple = 8
UpperCAmelCase__ : Optional[int] = 7
UpperCAmelCase__ : Optional[int] = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
UpperCAmelCase__ : str = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
UpperCAmelCase__ : Tuple = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCamelCase ( self , _UpperCAmelCase=0 ):
torch.manual_seed(_UpperCAmelCase )
UpperCAmelCase__ : Any = 4
UpperCAmelCase__ : List[str] = 8
UpperCAmelCase__ : Dict = 7
UpperCAmelCase__ : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
UpperCAmelCase__ : Dict = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
UpperCAmelCase__ : Any = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def lowerCamelCase ( self ):
return (4, 8)
@property
def lowerCamelCase ( self ):
return (4, 8)
def lowerCamelCase ( self ):
UpperCAmelCase__ : List[str] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 4,
'''num_layers''': 2,
'''embedding_dim''': 8,
'''num_embeddings''': 7,
'''additional_embeddings''': 4,
}
UpperCAmelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' , output_loading_info=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_UpperCAmelCase )
UpperCAmelCase__ : List[Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def lowerCamelCase ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ : List[str] = self.model_class(**_UpperCAmelCase )
UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ['''hidden_states''', '''timestep''']
self.assertListEqual(arg_names[:2] , _UpperCAmelCase )
def lowerCamelCase ( self ):
UpperCAmelCase__ : Dict = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' )
UpperCAmelCase__ : Any = model.to(_UpperCAmelCase )
if hasattr(_UpperCAmelCase , '''set_default_attn_processor''' ):
model.set_default_attn_processor()
UpperCAmelCase__ : Tuple = self.get_dummy_seed_input()
with torch.no_grad():
UpperCAmelCase__ : Any = model(**_UpperCAmelCase )[0]
UpperCAmelCase__ : Tuple = output[0, :5].flatten().cpu()
print(_UpperCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
UpperCAmelCase__ : List[str] = torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-2 ) )
@slow
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self , _UpperCAmelCase=1 , _UpperCAmelCase=768 , _UpperCAmelCase=77 , _UpperCAmelCase=0 ):
torch.manual_seed(_UpperCAmelCase )
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : Optional[Any] = embedding_dim
UpperCAmelCase__ : Dict = num_embeddings
UpperCAmelCase__ : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
UpperCAmelCase__ : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
UpperCAmelCase__ : Dict = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[37, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ : Dict = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' , subfolder='''prior''' )
model.to(_UpperCAmelCase )
UpperCAmelCase__ : Optional[Any] = self.get_dummy_seed_input(seed=_UpperCAmelCase )
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(**_UpperCAmelCase )[0]
assert list(sample.shape ) == [1, 768]
UpperCAmelCase__ : str = sample[0, :8].flatten().cpu()
print(_UpperCAmelCase )
UpperCAmelCase__ : Tuple = torch.tensor(_UpperCAmelCase )
assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
| 599
|
'''simple docstring'''
def lowerCAmelCase__ ( a_ : list , a_ : list ) -> float:
_validate_point(a_ )
_validate_point(a_ )
if len(a_ ) != len(a_ ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(a_ , a_ ) ) )
def lowerCAmelCase__ ( a_ : list[float] ) -> None:
if point:
if isinstance(a_ , a_ ):
for item in point:
if not isinstance(a_ , (int, float) ):
UpperCAmelCase__ : Optional[Any] = (
'''Expected a list of numbers as input, found '''
f"""{type(a_ ).__name__}"""
)
raise TypeError(a_ )
else:
UpperCAmelCase__ : Tuple = f"""Expected a list of numbers as input, found {type(a_ ).__name__}"""
raise TypeError(a_ )
else:
raise ValueError('''Missing an input''' )
def lowerCAmelCase__ ( a_ : list , a_ : list ) -> float:
_validate_point(a_ )
_validate_point(a_ )
if len(a_ ) != len(a_ ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(a_ , a_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 599
| 1
|
from collections import deque
class __a :
def __init__( self : Dict , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : int ):
lowerCAmelCase_ : Union[str, Any] = process_name # process name
lowerCAmelCase_ : Tuple = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCAmelCase_ : Dict = arrival_time
lowerCAmelCase_ : Union[str, Any] = burst_time # remaining burst time
lowerCAmelCase_ : str = 0 # total time of the process wait in ready queue
lowerCAmelCase_ : Any = 0 # time from arrival time to completion time
class __a :
def __init__( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : list[int] , UpperCAmelCase : deque[Process] , UpperCAmelCase : int , ):
# total number of mlfq's queues
lowerCAmelCase_ : Optional[int] = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCAmelCase_ : Any = time_slices
# unfinished process is in this ready_queue
lowerCAmelCase_ : Optional[int] = queue
# current time
lowerCAmelCase_ : int = current_time
# finished process is in this sequence queue
lowerCAmelCase_ : deque[Process] = deque()
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Union[str, Any] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def A ( self : int , UpperCAmelCase : list[Process] ):
lowerCAmelCase_ : Optional[int] = []
for i in range(len(UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def A ( self : int , UpperCAmelCase : list[Process] ):
lowerCAmelCase_ : int = []
for i in range(len(UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def A ( self : Optional[Any] , UpperCAmelCase : list[Process] ):
lowerCAmelCase_ : int = []
for i in range(len(UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def A ( self : Dict , UpperCAmelCase : deque[Process] ):
return [q.burst_time for q in queue]
def A ( self : Tuple , UpperCAmelCase : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def A ( self : List[Any] , UpperCAmelCase : deque[Process] ):
lowerCAmelCase_ : deque[Process] = deque() # sequence deque of finished process
while len(UpperCAmelCase ) != 0:
lowerCAmelCase_ : Optional[int] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCAmelCase_ : List[str] = 0
# set the process's turnaround time because it is finished
lowerCAmelCase_ : Any = self.current_time - cp.arrival_time
# set the completion time
lowerCAmelCase_ : Tuple = self.current_time
# add the process to queue that has finished queue
finished.append(UpperCAmelCase )
self.finish_queue.extend(UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def A ( self : List[Any] , UpperCAmelCase : deque[Process] , UpperCAmelCase : int ):
lowerCAmelCase_ : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(UpperCAmelCase ) ):
lowerCAmelCase_ : int = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCAmelCase_ : Dict = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCAmelCase_ : Any = 0
# set the finish time
lowerCAmelCase_ : Union[str, Any] = self.current_time
# update the process' turnaround time because it is finished
lowerCAmelCase_ : Tuple = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(UpperCAmelCase )
self.finish_queue.extend(UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def A ( self : Any ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
__UpperCAmelCase = Process('P1', 0, 53)
__UpperCAmelCase = Process('P2', 0, 17)
__UpperCAmelCase = Process('P3', 0, 68)
__UpperCAmelCase = Process('P4', 0, 24)
__UpperCAmelCase = 3
__UpperCAmelCase = [17, 25]
__UpperCAmelCase = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
__UpperCAmelCase = Process('P1', 0, 53)
__UpperCAmelCase = Process('P2', 0, 17)
__UpperCAmelCase = Process('P3', 0, 68)
__UpperCAmelCase = Process('P4', 0, 24)
__UpperCAmelCase = 3
__UpperCAmelCase = [17, 25]
__UpperCAmelCase = deque([Pa, Pa, Pa, Pa])
__UpperCAmelCase = MLFQ(number_of_queues, time_slices, queue, 0)
__UpperCAmelCase = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 600
|
import pprint
import requests
__UpperCAmelCase = 'https://zenquotes.io/api'
def __UpperCamelCase ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __UpperCamelCase ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
__UpperCAmelCase = random_quotes()
pprint.pprint(response)
| 600
| 1
|
def UpperCamelCase__ ( UpperCAmelCase ) -> bool:
"""simple docstring"""
_a : Tuple = 0
for ch in input_str:
_a : Union[str, Any] = ord(SCREAMING_SNAKE_CASE_ )
_a : Union[str, Any] = pow(2 , SCREAMING_SNAKE_CASE_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json',
}
class UpperCamelCase_ ( UpperCamelCase ):
lowercase = '''xlnet'''
lowercase = ['''mems''']
lowercase = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , lowercase=32_000 , lowercase=1_024 , lowercase=24 , lowercase=16 , lowercase=4_096 , lowercase="gelu" , lowercase=True , lowercase="bi" , lowercase=0.02 , lowercase=1e-12 , lowercase=0.1 , lowercase=512 , lowercase=None , lowercase=True , lowercase=False , lowercase=False , lowercase=-1 , lowercase=False , lowercase="last" , lowercase=True , lowercase="tanh" , lowercase=0.1 , lowercase=5 , lowercase=5 , lowercase=5 , lowercase=1 , lowercase=2 , **lowercase , ) -> Optional[Any]:
_a : str = vocab_size
_a : int = d_model
_a : str = n_layer
_a : List[str] = n_head
if d_model % n_head != 0:
raise ValueError(F'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
_a : Dict = d_model // n_head
_a : int = ff_activation
_a : List[Any] = d_inner
_a : str = untie_r
_a : Any = attn_type
_a : List[Any] = initializer_range
_a : Optional[Any] = layer_norm_eps
_a : Optional[Any] = dropout
_a : List[str] = mem_len
_a : str = reuse_len
_a : int = bi_data
_a : List[str] = clamp_len
_a : List[str] = same_length
_a : List[str] = summary_type
_a : List[str] = summary_use_proj
_a : List[Any] = summary_activation
_a : int = summary_last_dropout
_a : List[str] = start_n_top
_a : Optional[Any] = end_n_top
_a : Tuple = bos_token_id
_a : Optional[int] = pad_token_id
_a : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , lowercase , )
_a : Union[str, Any] = kwargs['''use_cache''']
_a : int = use_mems_eval
_a : Optional[int] = use_mems_train
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
@property
def snake_case__( self ) -> Optional[int]:
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def snake_case__( self , lowercase ) -> Any:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 307
| 0
|
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
SCREAMING_SNAKE_CASE_ = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
SCREAMING_SNAKE_CASE_ = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
SCREAMING_SNAKE_CASE_ = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __a ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def __a ( self : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : int=False ):
'''simple docstring'''
if rouge_types is None:
__a = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
__a = rouge_scorer.RougeScorer(rouge_types=SCREAMING_SNAKE_CASE__ , use_stemmer=SCREAMING_SNAKE_CASE__ )
if use_aggregator:
__a = scoring.BootstrapAggregator()
else:
__a = []
for ref, pred in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a = scorer.score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if use_aggregator:
aggregator.add_scores(SCREAMING_SNAKE_CASE__ )
else:
scores.append(SCREAMING_SNAKE_CASE__ )
if use_aggregator:
__a = aggregator.aggregate()
else:
__a = {}
for key in scores[0]:
__a = [score[key] for score in scores]
return result
| 582
|
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase_ ( snake_case__ , snake_case__ ):
"""simple docstring"""
@register_to_config
def __init__( self : str , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None ):
'''simple docstring'''
super().__init__()
__a = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__a = torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
__a = None
__a = torch.nn.Parameter(SCREAMING_SNAKE_CASE__ )
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
a_ :VQModel
a_ :CLIPTextModel
a_ :CLIPTokenizer
a_ :TransformeraDModel
a_ :LearnedClassifierFreeSamplingEmbeddings
a_ :VQDiffusionScheduler
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : VQModel , SCREAMING_SNAKE_CASE__ : CLIPTextModel , SCREAMING_SNAKE_CASE__ : CLIPTokenizer , SCREAMING_SNAKE_CASE__ : TransformeraDModel , SCREAMING_SNAKE_CASE__ : VQDiffusionScheduler , SCREAMING_SNAKE_CASE__ : LearnedClassifierFreeSamplingEmbeddings , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=SCREAMING_SNAKE_CASE__ , transformer=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , learned_classifier_free_sampling_embeddings=SCREAMING_SNAKE_CASE__ , )
def __a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a = len(SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else 1
# get prompt text embeddings
__a = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__a = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__a = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__a = text_input_ids[:, : self.tokenizer.model_max_length]
__a = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__a = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=SCREAMING_SNAKE_CASE__ )
# duplicate text embeddings for each generation per prompt
__a = prompt_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__a = self.learned_classifier_free_sampling_embeddings.embeddings
__a = negative_prompt_embeds.unsqueeze(0 ).repeat(SCREAMING_SNAKE_CASE__ , 1 , 1 )
else:
__a = [""""""] * batch_size
__a = text_input_ids.shape[-1]
__a = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" , )
__a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__a = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=SCREAMING_SNAKE_CASE__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__a = negative_prompt_embeds.shape[1]
__a = negative_prompt_embeds.repeat(1 , SCREAMING_SNAKE_CASE__ , 1 )
__a = negative_prompt_embeds.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, List[str]] , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE__ : int = 1 , ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE__ )}''' )
__a = batch_size * num_images_per_prompt
__a = guidance_scale > 1.0
__a = self._encode_prompt(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(SCREAMING_SNAKE_CASE__ )}.''' )
# get the initial completely masked latents unless the user supplied it
__a = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__a = self.transformer.num_vector_embeds - 1
__a = torch.full(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
__a = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=self.device )
__a = self.scheduler.timesteps.to(self.device )
__a = latents
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the sample if we are doing classifier free guidance
__a = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__a = self.transformer(SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ ).sample
if do_classifier_free_guidance:
__a , __a = model_output.chunk(2 )
__a = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(SCREAMING_SNAKE_CASE__ , dim=1 , keepdim=SCREAMING_SNAKE_CASE__ )
__a = self.truncate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# remove `log(0)`'s (`-inf`s)
__a = model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a = self.vqvae.config.vq_embed_dim
__a = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__a = self.vqvae.quantize.get_codebook_entry(SCREAMING_SNAKE_CASE__ , shape=SCREAMING_SNAKE_CASE__ )
__a = self.vqvae.decode(SCREAMING_SNAKE_CASE__ , force_not_quantize=SCREAMING_SNAKE_CASE__ ).sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
def __a ( self : List[str] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : float ):
'''simple docstring'''
__a , __a = torch.sort(SCREAMING_SNAKE_CASE__ , 1 , descending=SCREAMING_SNAKE_CASE__ )
__a = torch.exp(SCREAMING_SNAKE_CASE__ )
__a = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__a = torch.full_like(keep_mask[:, 0:1, :] , SCREAMING_SNAKE_CASE__ )
__a = torch.cat((all_true, keep_mask) , dim=1 )
__a = keep_mask[:, :-1, :]
__a = keep_mask.gather(1 , indices.argsort(1 ) )
__a = log_p_x_0.clone()
__a = -torch.inf # -inf = log(0)
return rv
| 582
| 1
|
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , A_ : Dict , A_ : Optional[Any]=1_3 , A_ : List[Any]=7 , A_ : Optional[Any]=True , A_ : List[str]=True , A_ : str=True , A_ : Dict=True , A_ : Dict=9_9 , A_ : str=3_2 , A_ : List[str]=5 , A_ : Union[str, Any]=4 , A_ : Dict=3_7 , A_ : List[str]="gelu" , A_ : List[str]=0.1 , A_ : List[str]=0.1 , A_ : Optional[Any]=5_1_2 , A_ : int=1_6 , A_ : Tuple=2 , A_ : Tuple=0.02 , A_ : List[str]=4 , ):
'''simple docstring'''
_lowerCAmelCase : int = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : int = seq_length
_lowerCAmelCase : str = is_training
_lowerCAmelCase : int = use_attention_mask
_lowerCAmelCase : List[Any] = use_token_type_ids
_lowerCAmelCase : List[str] = use_labels
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : List[Any] = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : Tuple = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Dict = num_choices
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Tuple = None
if self.use_attention_mask:
_lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : int = None
if self.use_token_type_ids:
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Union[str, Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __magic_name__ ( self : str ):
'''simple docstring'''
_lowerCAmelCase : int = self.prepare_config_and_inputs()
_lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
_lowerCAmelCase : Optional[Any] = config_and_inputs
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A__ ( A , unittest.TestCase ):
"""simple docstring"""
_lowercase : Optional[Any] =True
_lowercase : Any =(
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : List[str] = FlaxBertModelTester(self )
@slow
def __magic_name__ ( self : Any ):
'''simple docstring'''
_lowerCAmelCase : str = FlaxBertModel.from_pretrained("bert-base-cased" )
_lowerCAmelCase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
| 704
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('T')
class A__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[int] , A_ : bool = True ):
'''simple docstring'''
_lowerCAmelCase : dict[T, list[T]] = {} # dictionary of lists
_lowerCAmelCase : List[Any] = directed
def __magic_name__ ( self : Any , A_ : T , A_ : T ):
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(A_ )
self.adj_list[destination_vertex].append(A_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(A_ )
_lowerCAmelCase : List[Any] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(A_ )
_lowerCAmelCase : List[str] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_lowerCAmelCase : List[str] = [destination_vertex]
_lowerCAmelCase : Dict = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(A_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(A_ )
_lowerCAmelCase : Tuple = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_lowerCAmelCase : Dict = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_lowerCAmelCase : Any = [destination_vertex]
_lowerCAmelCase : Union[str, Any] = []
return self
def __repr__( self : List[str] ):
'''simple docstring'''
return pformat(self.adj_list )
| 503
| 0
|
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
__A : Any = 0.0
for coeff in reversed(a__ ):
__A : List[str] = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase_ : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_a : List[str] = logging.get_logger(__name__)
def a_ ( __magic_name__ , __magic_name__=False ) -> Optional[Any]:
"""simple docstring"""
snake_case : Tuple = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=False ) -> int:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
snake_case : List[str] = ''''''
else:
snake_case : str = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case : int = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
snake_case : str = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case : int = in_proj_weight[
: config.hidden_size, :
]
snake_case : Union[str, Any] = in_proj_bias[: config.hidden_size]
snake_case : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case : List[str] = in_proj_weight[
-config.hidden_size :, :
]
snake_case : List[str] = in_proj_bias[-config.hidden_size :]
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
snake_case : Tuple = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
"""simple docstring"""
snake_case : Optional[Any] = dct.pop(__magic_name__ )
snake_case : Any = val
def a_ ( ) -> Tuple:
"""simple docstring"""
snake_case : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case : Any = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=False ) -> Tuple:
"""simple docstring"""
snake_case : List[Any] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=__magic_name__ , )
snake_case : Union[str, Any] = ViTHybridConfig(backbone_config=__magic_name__ , image_size=384 , num_labels=1_000 )
snake_case : str = False
# load original model from timm
snake_case : List[Any] = timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case : List[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(__magic_name__ )
snake_case : List[str] = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case : Dict = '''huggingface/label-files'''
snake_case : Dict = '''imagenet-1k-id2label.json'''
snake_case : Any = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case : Tuple = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case : Optional[int] = idalabel
snake_case : int = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case : Any = ViTHybridModel(__magic_name__ ).eval()
else:
snake_case : str = ViTHybridForImageClassification(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# create image processor
snake_case : str = create_transform(**resolve_data_config({} , model=__magic_name__ ) )
snake_case : Optional[int] = transform.transforms
snake_case : str = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
snake_case : Optional[Any] = ViTHybridImageProcessor(
do_resize=__magic_name__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case : str = prepare_img()
snake_case : Optional[Any] = transform(__magic_name__ ).unsqueeze(0 )
snake_case : Optional[int] = processor(__magic_name__ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__magic_name__ , __magic_name__ )
# verify logits
with torch.no_grad():
snake_case : Dict = model(__magic_name__ )
snake_case : Union[str, Any] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
snake_case : Union[str, Any] = timm_model.forward_features(__magic_name__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__magic_name__ , outputs.pooler_output , atol=1e-3 )
else:
snake_case : int = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(F"ybelkada/{vit_name}" )
processor.push_to_hub(F"ybelkada/{vit_name}" )
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
_a : int = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 598
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=[3_0, 3_0] , snake_case=2 , snake_case=3 , snake_case=True , snake_case=True , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=1_0 , snake_case=0.02 , snake_case=3 , snake_case=None , snake_case=8 , snake_case=1_0 , ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : Optional[int] = image_size
UpperCAmelCase : List[str] = patch_size
UpperCAmelCase : Any = num_channels
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : Optional[int] = use_labels
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : Optional[Any] = num_attention_heads
UpperCAmelCase : int = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : List[str] = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = type_sequence_label_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : Tuple = num_labels
UpperCAmelCase : Tuple = scope
UpperCAmelCase : Any = n_targets
UpperCAmelCase : List[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
UpperCAmelCase : Any = (image_size[1] // patch_size) * (image_size[0] // patch_size)
UpperCAmelCase : Optional[int] = num_patches + 1 + self.num_detection_tokens
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
UpperCAmelCase : Dict = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
UpperCAmelCase : Optional[int] = []
for i in range(self.batch_size ):
UpperCAmelCase : str = {}
UpperCAmelCase : Dict = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=snake_case )
UpperCAmelCase : List[Any] = torch.rand(self.n_targets , 4 , device=snake_case )
labels.append(snake_case )
UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Dict = YolosModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : str = model(snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : int = YolosForObjectDetection(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : str = model(pixel_values=snake_case )
UpperCAmelCase : List[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
UpperCAmelCase : List[Any] = model(pixel_values=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = config_and_inputs
UpperCAmelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : List[str] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def A_ ( self , snake_case , snake_case , snake_case=False ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
UpperCAmelCase : str = []
for i in range(self.model_tester.batch_size ):
UpperCAmelCase : int = {}
UpperCAmelCase : str = torch.ones(
size=(self.model_tester.n_targets,) , device=snake_case , dtype=torch.long )
UpperCAmelCase : Tuple = torch.ones(
self.model_tester.n_targets , 4 , device=snake_case , dtype=torch.float )
labels.append(snake_case )
UpperCAmelCase : Optional[Any] = labels
return inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = YolosModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=3_7 )
def A_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Any = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(snake_case )
UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : str = [*signature.parameters.keys()]
UpperCAmelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Any = True
# in YOLOS, the seq_len is different
UpperCAmelCase : Union[str, Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = True
UpperCAmelCase : Dict = False
UpperCAmelCase : Any = True
UpperCAmelCase : List[str] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : List[Any] = outputs.attentions
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase : str = True
UpperCAmelCase : Tuple = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase : int = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : List[str] = outputs.attentions
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
UpperCAmelCase : List[Any] = len(snake_case )
# Check attention is always last and order is fine
UpperCAmelCase : Any = True
UpperCAmelCase : int = True
UpperCAmelCase : List[Any] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : Dict = 1
self.assertEqual(out_len + added_hidden_states , len(snake_case ) )
UpperCAmelCase : List[Any] = outputs.attentions
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def A_ ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case , snake_case , snake_case ):
UpperCAmelCase : Optional[int] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : List[str] = outputs.hidden_states
UpperCAmelCase : Dict = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case ) , snake_case )
# YOLOS has a different seq_length
UpperCAmelCase : Dict = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Union[str, Any] = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : int = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*snake_case )
@slow
def A_ ( self ):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Tuple = YolosModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(snake_case )
UpperCAmelCase : Optional[Any] = self.default_image_processor
UpperCAmelCase : Dict = prepare_img()
UpperCAmelCase : int = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase : List[str] = model(inputs.pixel_values )
# verify outputs
UpperCAmelCase : Any = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCAmelCase : str = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=snake_case , )
UpperCAmelCase : Optional[int] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , snake_case , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , snake_case , atol=1e-4 ) )
# verify postprocessing
UpperCAmelCase : Any = image_processor.post_process_object_detection(
snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
UpperCAmelCase : List[str] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(snake_case )
UpperCAmelCase : Union[str, Any] = [7_5, 7_5, 1_7, 6_3, 1_7]
UpperCAmelCase : Optional[int] = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(snake_case )
self.assertEqual(len(results["scores"] ) , 5 )
self.assertTrue(torch.allclose(results["scores"] , snake_case , atol=1e-4 ) )
self.assertSequenceEqual(results["labels"].tolist() , snake_case )
self.assertTrue(torch.allclose(results["boxes"][0, :] , snake_case ) )
| 609
|
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=False , snake_case=False , snake_case=False , snake_case=2 , snake_case=9_9 , snake_case=0 , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=2 , snake_case=0.02 , snake_case=2 , snake_case=4 , snake_case="last" , snake_case=True , snake_case=None , snake_case=0 , ):
'''simple docstring'''
UpperCAmelCase : str = parent
UpperCAmelCase : str = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : int = is_training
UpperCAmelCase : Any = use_input_lengths
UpperCAmelCase : str = use_token_type_ids
UpperCAmelCase : List[str] = use_labels
UpperCAmelCase : Any = gelu_activation
UpperCAmelCase : str = sinusoidal_embeddings
UpperCAmelCase : List[Any] = causal
UpperCAmelCase : Union[str, Any] = asm
UpperCAmelCase : List[str] = n_langs
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : str = n_special
UpperCAmelCase : str = hidden_size
UpperCAmelCase : Union[str, Any] = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : List[Any] = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : str = max_position_embeddings
UpperCAmelCase : Optional[int] = type_sequence_label_size
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : Union[str, Any] = num_labels
UpperCAmelCase : Union[str, Any] = num_choices
UpperCAmelCase : Dict = summary_type
UpperCAmelCase : Dict = use_proj
UpperCAmelCase : List[Any] = scope
UpperCAmelCase : Optional[int] = bos_token_id
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
if self.use_input_lengths:
UpperCAmelCase : Dict = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase : Any = None
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Tuple = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Dict = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : List[str] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def A_ ( self ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCAmelCase : Any = XLMModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Any = model(snake_case , lengths=snake_case , langs=snake_case )
UpperCAmelCase : Any = model(snake_case , langs=snake_case )
UpperCAmelCase : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCAmelCase : int = XLMWithLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Tuple = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = XLMForQuestionAnsweringSimple(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : List[str] = model(snake_case )
UpperCAmelCase : List[str] = model(snake_case , start_positions=snake_case , end_positions=snake_case )
UpperCAmelCase : List[str] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = XLMForQuestionAnswering(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Union[str, Any] = model(snake_case )
UpperCAmelCase : List[str] = model(
snake_case , start_positions=snake_case , end_positions=snake_case , cls_index=snake_case , is_impossible=snake_case , p_mask=snake_case , )
UpperCAmelCase : Optional[Any] = model(
snake_case , start_positions=snake_case , end_positions=snake_case , cls_index=snake_case , is_impossible=snake_case , )
((UpperCAmelCase) , ) : str = result_with_labels.to_tuple()
UpperCAmelCase : List[str] = model(snake_case , start_positions=snake_case , end_positions=snake_case )
((UpperCAmelCase) , ) : str = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCAmelCase : Any = XLMForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(snake_case )
UpperCAmelCase : int = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.num_labels
UpperCAmelCase : Optional[int] = XLMForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : List[Any] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.num_choices
UpperCAmelCase : Tuple = XLMForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Tuple = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE__ : Tuple = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def A_ ( self , snake_case , snake_case , snake_case=False ):
'''simple docstring'''
UpperCAmelCase : Any = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
UpperCAmelCase : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = XLMModelTester(self )
UpperCAmelCase : str = ConfigTester(self , config_class=snake_case , emb_dim=3_7 )
def A_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*snake_case )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=False , snake_case=1 ):
'''simple docstring'''
self.assertIsInstance(snake_case , snake_case )
self.assertListEqual(
[isinstance(snake_case , snake_case ) for iter_attentions in attentions] , [True] * len(snake_case ) )
self.assertEqual(len(snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(snake_case ):
# adds PAD dummy token
UpperCAmelCase : str = min_length + idx + 1
UpperCAmelCase : List[Any] = min_length + idx + 1
UpperCAmelCase : List[Any] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(snake_case ) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=False , snake_case=1 ):
'''simple docstring'''
self.assertIsInstance(snake_case , snake_case )
self.assertListEqual(
[isinstance(snake_case , snake_case ) for iter_hidden_states in hidden_states] , [True] * len(snake_case ) , )
self.assertEqual(len(snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(snake_case ):
# adds PAD dummy token
UpperCAmelCase : List[Any] = min_length + idx + 1
UpperCAmelCase : Dict = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(snake_case ) , )
pass
@slow
def A_ ( self ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Tuple = XLMModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(snake_case )
UpperCAmelCase : Optional[int] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=snake_case ) # the president
UpperCAmelCase : Tuple = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase : Dict = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , snake_case )
| 609
| 1
|
"""simple docstring"""
def __UpperCamelCase ( SCREAMING_SNAKE_CASE = 1_00_00_00 ) -> int:
"""simple docstring"""
__snake_case = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 163
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __magic_name__ ( datasets.BeamBasedBuilder ):
def lowerCAmelCase ( self : List[str] ):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=snake_case_ , )
def lowerCAmelCase ( self : Optional[int] , snake_case_ : int , snake_case_ : Any ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def lowerCAmelCase ( self : Optional[Any] , snake_case_ : int , snake_case_ : Dict ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(snake_case_ )
class __magic_name__ ( datasets.BeamBasedBuilder ):
def lowerCAmelCase ( self : Optional[int] ):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=snake_case_ , )
def lowerCAmelCase ( self : List[str] , snake_case_ : List[Any] , snake_case_ : List[str] ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def lowerCAmelCase ( self : str , snake_case_ : List[Any] , snake_case_ : Tuple ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(snake_case_ )
def __UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class __magic_name__ ( lowercase__ ):
@require_beam
def lowerCAmelCase ( self : Optional[Any] ):
__snake_case = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__snake_case = DummyBeamDataset(cache_dir=snake_case_ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(snake_case_ , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
__snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case_ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case_ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(snake_case_ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def lowerCAmelCase ( self : int ):
import apache_beam as beam
__snake_case = beam.io.parquetio.WriteToParquet
__snake_case = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__snake_case = DummyBeamDataset(cache_dir=snake_case_ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
__snake_case = partial(snake_case_ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
snake_case_ , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
snake_case_ , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
__snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case_ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case_ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(snake_case_ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def lowerCAmelCase ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__snake_case = DummyBeamDataset(cache_dir=snake_case_ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowerCAmelCase ( self : List[str] ):
__snake_case = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__snake_case = NestedBeamDataset(cache_dir=snake_case_ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(snake_case_ , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
__snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case_ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case_ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(snake_case_ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 163
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def __snake_case ( self : List[str] )->str:
__SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
__SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("google/mt5-small" )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer("Hello there" , return_tensors="tf" ).input_ids
__SCREAMING_SNAKE_CASE : Dict = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
__SCREAMING_SNAKE_CASE : List[Any] = model(UpperCamelCase , labels=UpperCamelCase ).loss
__SCREAMING_SNAKE_CASE : List[str] = -tf.math.reduce_mean(UpperCamelCase ).numpy()
__SCREAMING_SNAKE_CASE : List[Any] = -2_1.2_2_8_1_6_8
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 720
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _lowerCAmelCase ( __lowerCamelCase : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = analyze_text(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
__SCREAMING_SNAKE_CASE : int = sum(single_char_strings.values() )
# one length string
__SCREAMING_SNAKE_CASE : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__SCREAMING_SNAKE_CASE : List[Any] = single_char_strings[ch]
__SCREAMING_SNAKE_CASE : str = my_str / all_sum
my_fir_sum += prob * math.loga(__lowerCamelCase ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
__SCREAMING_SNAKE_CASE : Optional[int] = sum(two_char_strings.values() )
__SCREAMING_SNAKE_CASE : int = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__SCREAMING_SNAKE_CASE : int = cha + cha
if sequence in two_char_strings:
__SCREAMING_SNAKE_CASE : Optional[Any] = two_char_strings[sequence]
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(__lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(__lowerCamelCase )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def _lowerCAmelCase ( __lowerCamelCase : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = Counter() # type: ignore
__SCREAMING_SNAKE_CASE : int = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _lowerCAmelCase ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 447
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Union[str, Any] = ['audio_values', 'audio_mask']
def __init__( self : Dict , a : Optional[Any]=2_048 , a : Union[str, Any]=1 , a : str=[16, 16] , a : Optional[int]=128 , a : str=44_100 , a : List[str]=86 , a : int=2_048 , a : Tuple=0.0 , **a : int , )-> Any:
"""simple docstring"""
super().__init__(
feature_size=a , sampling_rate=a , padding_value=a , **a , )
lowercase__ = spectrogram_length
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = feature_size // self.patch_size[1]
lowercase__ = n_fft
lowercase__ = sampling_rate // hop_length_to_sampling_rate
lowercase__ = sampling_rate
lowercase__ = padding_value
lowercase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=a , norm='slaney' , mel_scale='slaney' , ).T
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : np.array )-> np.ndarray:
"""simple docstring"""
lowercase__ = spectrogram(
a , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
lowercase__ = log_spec[:, :-1]
lowercase__ = log_spec - 20.0
lowercase__ = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : List[Any] , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : Optional[Union[str, TensorType]] = None , a : Optional[bool] = True , a : Optional[int] = None , a : bool = False , a : bool = False , **a : Union[str, Any] , )-> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowercase__ = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a , np.ndarray ):
lowercase__ = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , a ):
lowercase__ = [np.asarray(a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase__ = np.array(a ).astype(np.floataa )
# convert into correct format for padding
lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase__ = np.ones([len(a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase__ = padded_audio_features * self.padding_value
for i in range(len(a ) ):
lowercase__ = audio_features[i]
lowercase__ = feature
# return as BatchFeature
if return_attention_mask:
lowercase__ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
lowercase__ = {'audio_values': padded_audio_features}
lowercase__ = BatchFeature(data=a , tensor_type=a )
return encoded_inputs
| 235
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
__UpperCamelCase = StableDiffusionXLImgaImgPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {"latents"}
__UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self: List[str]):
'''simple docstring'''
torch.manual_seed(0)
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D"""), up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D"""), attention_head_dim=(2, 4), use_linear_projection=_lowercase, addition_embed_type="""text_time""", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, )
__lowerCAmelCase = EulerDiscreteScheduler(
beta_start=0.00_085, beta_end=0.012, steps_offset=1, beta_schedule="""scaled_linear""", timestep_spacing="""leading""", )
torch.manual_seed(0)
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], latent_channels=4, sample_size=128, )
torch.manual_seed(0)
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="""gelu""", projection_dim=32, )
__lowerCAmelCase = CLIPTextModel(_lowercase)
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""", local_files_only=_lowercase)
__lowerCAmelCase = CLIPTextModelWithProjection(_lowercase)
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""", local_files_only=_lowercase)
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _lowercase ( self: Any, _lowercase: Any, _lowercase: Tuple=0):
'''simple docstring'''
__lowerCAmelCase = floats_tensor((1, 3, 32, 32), rng=random.Random(_lowercase)).to(_lowercase)
__lowerCAmelCase = image / 2 + 0.5
if str(_lowercase).startswith("""mps"""):
__lowerCAmelCase = torch.manual_seed(_lowercase)
else:
__lowerCAmelCase = torch.Generator(device=_lowercase).manual_seed(_lowercase)
__lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def _lowercase ( self: List[Any]):
'''simple docstring'''
__lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**_lowercase)
__lowerCAmelCase = sd_pipe.to(_lowercase)
sd_pipe.set_progress_bar_config(disable=_lowercase)
__lowerCAmelCase = self.get_dummy_inputs(_lowercase)
__lowerCAmelCase = sd_pipe(**_lowercase).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _lowercase ( self: int):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3)
def _lowercase ( self: str):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
def _lowercase ( self: Optional[Any]):
'''simple docstring'''
pass
def _lowercase ( self: Tuple):
'''simple docstring'''
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**_lowercase)
__lowerCAmelCase = sd_pipe.to(_lowercase)
__lowerCAmelCase = sd_pipe.to(_lowercase)
sd_pipe.set_progress_bar_config(disable=_lowercase)
# forward without prompt embeds
__lowerCAmelCase = self.get_dummy_inputs(_lowercase)
__lowerCAmelCase = 3 * ["""this is a negative prompt"""]
__lowerCAmelCase = negative_prompt
__lowerCAmelCase = 3 * [inputs["""prompt"""]]
__lowerCAmelCase = sd_pipe(**_lowercase)
__lowerCAmelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__lowerCAmelCase = self.get_dummy_inputs(_lowercase)
__lowerCAmelCase = 3 * ["""this is a negative prompt"""]
__lowerCAmelCase = 3 * [inputs.pop("""prompt""")]
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = sd_pipe.encode_prompt(_lowercase, negative_prompt=_lowercase)
__lowerCAmelCase = sd_pipe(
**_lowercase, prompt_embeds=_lowercase, negative_prompt_embeds=_lowercase, pooled_prompt_embeds=_lowercase, negative_pooled_prompt_embeds=_lowercase, )
__lowerCAmelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1e-4
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def _lowercase ( self: Optional[int]):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self: Any, _lowercase: Any, _lowercase: Optional[Any]="cpu", _lowercase: List[Any]=torch.floataa, _lowercase: Optional[Any]=0):
'''simple docstring'''
__lowerCAmelCase = torch.Generator(device=_lowercase).manual_seed(_lowercase)
__lowerCAmelCase = np.random.RandomState(_lowercase).standard_normal((1, 4, 64, 64))
__lowerCAmelCase = torch.from_numpy(_lowercase).to(device=_lowercase, dtype=_lowercase)
__lowerCAmelCase = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _lowercase ( self: Optional[Any]):
'''simple docstring'''
__lowerCAmelCase = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""")
pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
__lowerCAmelCase = self.get_inputs(_lowercase)
__lowerCAmelCase = pipe(**_lowercase).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506])
assert np.abs(image_slice - expected_slice).max() < 7e-3
| 334
|
def UpperCAmelCase ( UpperCamelCase__ = 10_00 ) -> int:
'''simple docstring'''
__lowerCAmelCase = -1
__lowerCAmelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__lowerCAmelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
__lowerCAmelCase = n - a - b
if c * c == (a * a + b * b):
__lowerCAmelCase = a * b * c
if candidate >= product:
__lowerCAmelCase = candidate
return product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 334
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: Any=False ):
"""simple docstring"""
__lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
__lowerCAmelCase = "segformer.encoder." + key
if key.startswith("backbone" ):
__lowerCAmelCase = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
__lowerCAmelCase = key[key.find("patch_embed" ) + len("patch_embed" )]
__lowerCAmelCase = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(UpperCamelCase )-1}" )
if "norm" in key:
__lowerCAmelCase = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
__lowerCAmelCase = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
__lowerCAmelCase = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(UpperCamelCase )-1}" )
if "layer_norm1" in key:
__lowerCAmelCase = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
__lowerCAmelCase = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
__lowerCAmelCase = key[key.find("block" ) + len("block" )]
__lowerCAmelCase = key.replace(F"block{idx}" , F"block.{int(UpperCamelCase )-1}" )
if "attn.q" in key:
__lowerCAmelCase = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
__lowerCAmelCase = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
__lowerCAmelCase = key.replace("attn" , "attention.self" )
if "fc1" in key:
__lowerCAmelCase = key.replace("fc1" , "dense1" )
if "fc2" in key:
__lowerCAmelCase = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
__lowerCAmelCase = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
__lowerCAmelCase = key.replace("linear_fuse.conv" , "linear_fuse" )
__lowerCAmelCase = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
__lowerCAmelCase = key[key.find("linear_c" ) + len("linear_c" )]
__lowerCAmelCase = key.replace(F"linear_c{idx}" , F"linear_c.{int(UpperCamelCase )-1}" )
if key.startswith("head" ):
__lowerCAmelCase = key.replace("head" , "classifier" )
__lowerCAmelCase = value
return new_state_dict
def _UpperCAmelCase ( UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
__lowerCAmelCase = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.weight" )
__lowerCAmelCase = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
__lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
__lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
__lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
__lowerCAmelCase = kv_bias[
config.hidden_sizes[i] :
]
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( UpperCamelCase: Any , UpperCamelCase: str , UpperCamelCase: Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = SegformerConfig()
__lowerCAmelCase = False
# set attributes based on model_name
__lowerCAmelCase = "huggingface/label-files"
if "segformer" in model_name:
__lowerCAmelCase = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
__lowerCAmelCase = 1_5_0
__lowerCAmelCase = "ade20k-id2label.json"
__lowerCAmelCase = (1, 1_5_0, 1_2_8, 1_2_8)
elif "city" in model_name:
__lowerCAmelCase = 1_9
__lowerCAmelCase = "cityscapes-id2label.json"
__lowerCAmelCase = (1, 1_9, 1_2_8, 1_2_8)
else:
raise ValueError(F"Model {model_name} not supported" )
elif "mit" in model_name:
__lowerCAmelCase = True
__lowerCAmelCase = model_name[4:6]
__lowerCAmelCase = 1_0_0_0
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = (1, 1_0_0_0)
else:
raise ValueError(F"Model {model_name} not supported" )
# set config attributes
__lowerCAmelCase = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
__lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase = 2_5_6
elif size == "b2":
__lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase = 7_6_8
__lowerCAmelCase = [3, 4, 6, 3]
elif size == "b3":
__lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase = 7_6_8
__lowerCAmelCase = [3, 4, 1_8, 3]
elif size == "b4":
__lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase = 7_6_8
__lowerCAmelCase = [3, 8, 2_7, 3]
elif size == "b5":
__lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase = 7_6_8
__lowerCAmelCase = [3, 6, 4_0, 3]
else:
raise ValueError(F"Size {size} not supported" )
# load image processor (only resize + normalize)
__lowerCAmelCase = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=UpperCamelCase , align=UpperCamelCase , do_random_crop=UpperCamelCase )
# prepare image
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=UpperCamelCase , return_tensors="pt" ).pixel_values
logger.info(F"Converting model {model_name}..." )
# load original state dict
if encoder_only:
__lowerCAmelCase = torch.load(UpperCamelCase , map_location=torch.device("cpu" ) )
else:
__lowerCAmelCase = torch.load(UpperCamelCase , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
__lowerCAmelCase = rename_keys(UpperCamelCase , encoder_only=UpperCamelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase , UpperCamelCase )
# create HuggingFace model and load state dict
if encoder_only:
__lowerCAmelCase = False
__lowerCAmelCase = SegformerForImageClassification(UpperCamelCase )
else:
__lowerCAmelCase = SegformerForSemanticSegmentation(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# forward pass
__lowerCAmelCase = model(UpperCamelCase )
__lowerCAmelCase = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
__lowerCAmelCase = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
__lowerCAmelCase = torch.tensor(
[
[[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]],
[[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]],
[[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
__lowerCAmelCase = torch.tensor(
[
[[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]],
[[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]],
[[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
__lowerCAmelCase = torch.tensor(
[
[[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]],
[[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]],
[[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
__lowerCAmelCase = torch.tensor(
[
[[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]],
[[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]],
[[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
__lowerCAmelCase = torch.tensor(
[
[[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]],
[[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]],
[[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]],
[[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]],
[[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]],
[[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]],
[[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
__lowerCAmelCase = torch.tensor(
[
[
[-1.13_72e01, -1.27_87e01, -1.34_77e01],
[-1.25_36e01, -1.41_94e01, -1.44_09e01],
[-1.32_17e01, -1.48_88e01, -1.53_27e01],
],
[
[-1.47_91e01, -1.71_22e01, -1.82_77e01],
[-1.71_63e01, -1.91_92e01, -1.95_33e01],
[-1.78_97e01, -1.99_91e01, -2.03_15e01],
],
[
[7.67_23e-01, 4.19_21e-01, -7.78_78e-02],
[4.77_72e-01, 9.55_57e-03, -2.80_82e-01],
[3.60_32e-01, -2.48_26e-01, -5.11_68e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]],
[[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]],
[[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]],
[[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]],
[[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]],
[[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]],
[[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]],
[[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]],
[[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]],
[[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]],
[[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]],
] )
else:
__lowerCAmelCase = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
UpperCamelCase_ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 611
|
def _UpperCAmelCase ( UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
__lowerCAmelCase = _modexpt(UpperCamelCase , exponent // 2 , UpperCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCamelCase , exponent - 1 , UpperCamelCase )) % modulo_value
def _UpperCAmelCase ( UpperCamelCase: int = 1_7_7_7 , UpperCamelCase: int = 1_8_5_5 , UpperCamelCase: int = 8 ):
"""simple docstring"""
__lowerCAmelCase = base
for _ in range(1 , UpperCamelCase ):
__lowerCAmelCase = _modexpt(UpperCamelCase , UpperCamelCase , 1_0**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 611
| 1
|
'''simple docstring'''
from typing import List
import numpy as np
def lowerCAmelCase( a__ : dict ):
'''simple docstring'''
lowerCamelCase__ = {key: len(a__ ) for key, value in gen_kwargs.items() if isinstance(a__ , a__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
lowerCamelCase__ = max(lists_lengths.values() , default=0 )
return max(1 , a__ )
def lowerCAmelCase( a__ : int , a__ : int ):
'''simple docstring'''
lowerCamelCase__ = []
for group_idx in range(a__ ):
lowerCamelCase__ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowerCamelCase__ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowerCamelCase__ = range(a__ , start + num_shards_to_add )
shards_indices_per_group.append(a__ )
return shards_indices_per_group
def lowerCAmelCase( a__ : dict , a__ : int ):
'''simple docstring'''
lowerCamelCase__ = _number_of_shards_in_gen_kwargs(a__ )
if num_shards == 1:
return [dict(a__ )]
else:
lowerCamelCase__ = _distribute_shards(num_shards=a__ , max_num_jobs=a__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(a__ , a__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(a__ ) )
]
def lowerCAmelCase( a__ : List[dict] ):
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , a__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def lowerCAmelCase( a__ : np.random.Generator , a__ : dict ):
'''simple docstring'''
lowerCamelCase__ = {len(a__ ) for value in gen_kwargs.values() if isinstance(a__ , a__ )}
lowerCamelCase__ = {}
for size in list_sizes:
lowerCamelCase__ = list(range(a__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowerCamelCase__ = dict(a__ )
for key, value in shuffled_kwargs.items():
if isinstance(a__ , a__ ):
lowerCamelCase__ = [value[i] for i in indices_per_size[len(a__ )]]
return shuffled_kwargs
| 707
|
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class snake_case_ ( A__ ):
"""simple docstring"""
__lowerCAmelCase : List[Any] =ComputeEnvironment.AMAZON_SAGEMAKER
__lowerCAmelCase : Optional[Any] =True
__lowerCAmelCase : Tuple ='''ml.p3.2xlarge'''
__lowerCAmelCase : Optional[Any] ='''accelerate_sagemaker_execution_role'''
__lowerCAmelCase : List[Any] ='''hf-sm'''
__lowerCAmelCase : Dict ='''us-east-1'''
__lowerCAmelCase : Optional[Any] =1
__lowerCAmelCase : Optional[int] ='''accelerate-sagemaker-1'''
__lowerCAmelCase : List[str] ='''1.6'''
__lowerCAmelCase : Any ='''4.4'''
__lowerCAmelCase : Optional[int] ='''train.py'''
__lowerCAmelCase : List[Any] =[
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
__lowerCAmelCase : str =[
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self):
# If no defaults are changed, `to_kwargs` returns an empty dict.
lowerCamelCase__ = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args["model_name_or_path"] , UpperCamelCase)
assert isinstance(converted_args["do_train"] , UpperCamelCase)
assert isinstance(converted_args["epochs"] , UpperCamelCase)
assert isinstance(converted_args["learning_rate"] , UpperCamelCase)
assert isinstance(converted_args["max_steps"] , UpperCamelCase)
with pytest.raises(UpperCamelCase):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args)
| 426
| 0
|
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class lowercase_ :
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str]=1_3 , __lowerCamelCase : str=7 , __lowerCamelCase : int=True , __lowerCamelCase : int=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : str=True , __lowerCamelCase : int=9_9 , __lowerCamelCase : str=6_4 , __lowerCamelCase : str=5 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : Any=6_4 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Optional[Any]=5_1_2 , __lowerCamelCase : Optional[int]=1_6 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : List[str]=0.0_2 , __lowerCamelCase : str=3 , __lowerCamelCase : int=4 , __lowerCamelCase : Optional[Any]=None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MPNetModel(config=_A )
model.to(_A )
model.eval()
_SCREAMING_SNAKE_CASE = model(_A , _A )
_SCREAMING_SNAKE_CASE = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MPNetForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
_SCREAMING_SNAKE_CASE = model(
_A , attention_mask=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = MPNetForSequenceClassification(_A )
model.to(_A )
model.eval()
_SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.num_choices
_SCREAMING_SNAKE_CASE = MPNetForMultipleChoice(config=_A )
model.to(_A )
model.eval()
_SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = model(
_A , attention_mask=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = MPNetForTokenClassification(config=_A )
model.to(_A )
model.eval()
_SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(_SCREAMING_SNAKE_CASE) = config_and_inputs
_SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
'''feature-extraction''': MPNetModel,
'''fill-mask''': MPNetForMaskedLM,
'''question-answering''': MPNetForQuestionAnswering,
'''text-classification''': MPNetForSequenceClassification,
'''token-classification''': MPNetForTokenClassification,
'''zero-shot''': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = True
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MPNetModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_A , hidden_size=3_7 )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*_A )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*_A )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*_A )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*_A )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*_A )
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MPNetModel.from_pretrained("microsoft/mpnet-base" )
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_SCREAMING_SNAKE_CASE = model(_A )[0]
_SCREAMING_SNAKE_CASE = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , _A )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
| 418
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = 42
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A=3 ,_A=3 ,_A=("DownEncoderBlock2D",) ,_A=(64,) ,_A=2 ,_A=32 ,_A="silu" ,_A=True ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : str = layers_per_block
_lowerCAmelCase : Optional[Any] = torch.nn.Convad(
_A ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,)
_lowerCAmelCase : Any = None
_lowerCAmelCase : Tuple = nn.ModuleList([] )
# down
_lowerCAmelCase : List[Any] = block_out_channels[0]
for i, down_block_type in enumerate(_A ):
_lowerCAmelCase : str = output_channel
_lowerCAmelCase : Union[str, Any] = block_out_channels[i]
_lowerCAmelCase : List[str] = i == len(_A ) - 1
_lowerCAmelCase : Dict = get_down_block(
_A ,num_layers=self.layers_per_block ,in_channels=_A ,out_channels=_A ,add_downsample=not is_final_block ,resnet_eps=1E-6 ,downsample_padding=0 ,resnet_act_fn=_A ,resnet_groups=_A ,attention_head_dim=_A ,temb_channels=_A ,)
self.down_blocks.append(_A )
# mid
_lowerCAmelCase : Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1E-6 ,resnet_act_fn=_A ,output_scale_factor=1 ,resnet_time_scale_shift='default' ,attention_head_dim=block_out_channels[-1] ,resnet_groups=_A ,temb_channels=_A ,)
# out
_lowerCAmelCase : str = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=_A ,eps=1E-6 )
_lowerCAmelCase : Any = nn.SiLU()
_lowerCAmelCase : List[str] = 2 * out_channels if double_z else out_channels
_lowerCAmelCase : List[str] = nn.Convad(block_out_channels[-1] ,_A ,3 ,padding=1 )
_lowerCAmelCase : Optional[Any] = False
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = x
_lowerCAmelCase : List[Any] = self.conv_in(_A )
if self.training and self.gradient_checkpointing:
def create_custom_forward(_A ):
def custom_forward(*_A ):
return module(*_A )
return custom_forward
# down
if is_torch_version('>=' ,'1.11.0' ):
for down_block in self.down_blocks:
_lowerCAmelCase : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(_A ) ,_A ,use_reentrant=_A )
# middle
_lowerCAmelCase : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,_A ,use_reentrant=_A )
else:
for down_block in self.down_blocks:
_lowerCAmelCase : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(_A ) ,_A )
# middle
_lowerCAmelCase : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,_A )
else:
# down
for down_block in self.down_blocks:
_lowerCAmelCase : Optional[int] = down_block(_A )
# middle
_lowerCAmelCase : int = self.mid_block(_A )
# post-process
_lowerCAmelCase : str = self.conv_norm_out(_A )
_lowerCAmelCase : str = self.conv_act(_A )
_lowerCAmelCase : List[Any] = self.conv_out(_A )
return sample
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A=3 ,_A=3 ,_A=("UpDecoderBlock2D",) ,_A=(64,) ,_A=2 ,_A=32 ,_A="silu" ,_A="group" ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = layers_per_block
_lowerCAmelCase : Dict = nn.Convad(
_A ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,)
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Dict = nn.ModuleList([] )
_lowerCAmelCase : List[str] = in_channels if norm_type == 'spatial' else None
# mid
_lowerCAmelCase : str = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1E-6 ,resnet_act_fn=_A ,output_scale_factor=1 ,resnet_time_scale_shift='default' if norm_type == 'group' else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=_A ,temb_channels=_A ,)
# up
_lowerCAmelCase : Optional[Any] = list(reversed(_A ) )
_lowerCAmelCase : List[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(_A ):
_lowerCAmelCase : List[Any] = output_channel
_lowerCAmelCase : Any = reversed_block_out_channels[i]
_lowerCAmelCase : Optional[int] = i == len(_A ) - 1
_lowerCAmelCase : Union[str, Any] = get_up_block(
_A ,num_layers=self.layers_per_block + 1 ,in_channels=_A ,out_channels=_A ,prev_output_channel=_A ,add_upsample=not is_final_block ,resnet_eps=1E-6 ,resnet_act_fn=_A ,resnet_groups=_A ,attention_head_dim=_A ,temb_channels=_A ,resnet_time_scale_shift=_A ,)
self.up_blocks.append(_A )
_lowerCAmelCase : str = output_channel
# out
if norm_type == "spatial":
_lowerCAmelCase : Tuple = SpatialNorm(block_out_channels[0] ,_A )
else:
_lowerCAmelCase : Optional[Any] = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=_A ,eps=1E-6 )
_lowerCAmelCase : Any = nn.SiLU()
_lowerCAmelCase : Any = nn.Convad(block_out_channels[0] ,_A ,3 ,padding=1 )
_lowerCAmelCase : Union[str, Any] = False
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = z
_lowerCAmelCase : str = self.conv_in(_A )
_lowerCAmelCase : Dict = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_A ):
def custom_forward(*_A ):
return module(*_A )
return custom_forward
if is_torch_version('>=' ,'1.11.0' ):
# middle
_lowerCAmelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,_A ,_A ,use_reentrant=_A )
_lowerCAmelCase : str = sample.to(_A )
# up
for up_block in self.up_blocks:
_lowerCAmelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(_A ) ,_A ,_A ,use_reentrant=_A )
else:
# middle
_lowerCAmelCase : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,_A ,_A )
_lowerCAmelCase : str = sample.to(_A )
# up
for up_block in self.up_blocks:
_lowerCAmelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(_A ) ,_A ,_A )
else:
# middle
_lowerCAmelCase : Optional[int] = self.mid_block(_A ,_A )
_lowerCAmelCase : Union[str, Any] = sample.to(_A )
# up
for up_block in self.up_blocks:
_lowerCAmelCase : Union[str, Any] = up_block(_A ,_A )
# post-process
if latent_embeds is None:
_lowerCAmelCase : List[Any] = self.conv_norm_out(_A )
else:
_lowerCAmelCase : Any = self.conv_norm_out(_A ,_A )
_lowerCAmelCase : int = self.conv_act(_A )
_lowerCAmelCase : List[Any] = self.conv_out(_A )
return sample
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A=None ,_A="random" ,_A=False ,_A=True ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = n_e
_lowerCAmelCase : int = vq_embed_dim
_lowerCAmelCase : List[Any] = beta
_lowerCAmelCase : List[Any] = legacy
_lowerCAmelCase : Union[str, Any] = nn.Embedding(self.n_e ,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e )
_lowerCAmelCase : int = remap
if self.remap is not None:
self.register_buffer('used' ,torch.tensor(np.load(self.remap ) ) )
_lowerCAmelCase : Any = self.used.shape[0]
_lowerCAmelCase : List[str] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_lowerCAmelCase : Any = self.re_embed
_lowerCAmelCase : Tuple = self.re_embed + 1
print(
F"""Remapping {self.n_e} indices to {self.re_embed} indices. """
F"""Using {self.unknown_index} for unknown indices.""" )
else:
_lowerCAmelCase : str = n_e
_lowerCAmelCase : List[str] = sane_index_shape
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = inds.shape
assert len(_A ) > 1
_lowerCAmelCase : List[Any] = inds.reshape(ishape[0] ,-1 )
_lowerCAmelCase : Tuple = self.used.to(_A )
_lowerCAmelCase : Any = (inds[:, :, None] == used[None, None, ...]).long()
_lowerCAmelCase : Tuple = match.argmax(-1 )
_lowerCAmelCase : Dict = match.sum(2 ) < 1
if self.unknown_index == "random":
_lowerCAmelCase : int = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device )
else:
_lowerCAmelCase : Dict = self.unknown_index
return new.reshape(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = inds.shape
assert len(_A ) > 1
_lowerCAmelCase : int = inds.reshape(ishape[0] ,-1 )
_lowerCAmelCase : Dict = self.used.to(_A )
if self.re_embed > self.used.shape[0]: # extra token
_lowerCAmelCase : List[str] = 0 # simply set to zero
_lowerCAmelCase : List[str] = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,_A )
return back.reshape(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = z.permute(0 ,2 ,3 ,1 ).contiguous()
_lowerCAmelCase : int = z.view(-1 ,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_lowerCAmelCase : List[str] = torch.argmin(torch.cdist(_A ,self.embedding.weight ) ,dim=1 )
_lowerCAmelCase : Optional[int] = self.embedding(_A ).view(z.shape )
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Tuple = None
# compute loss for embedding
if not self.legacy:
_lowerCAmelCase : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_lowerCAmelCase : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_lowerCAmelCase : Optional[int] = z + (z_q - z).detach()
# reshape back to match original input shape
_lowerCAmelCase : Optional[int] = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
if self.remap is not None:
_lowerCAmelCase : Any = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis
_lowerCAmelCase : Optional[int] = self.remap_to_used(_A )
_lowerCAmelCase : List[Any] = min_encoding_indices.reshape(-1 ,1 ) # flatten
if self.sane_index_shape:
_lowerCAmelCase : List[str] = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if self.remap is not None:
_lowerCAmelCase : List[str] = indices.reshape(shape[0] ,-1 ) # add batch axis
_lowerCAmelCase : List[str] = self.unmap_to_all(_A )
_lowerCAmelCase : List[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_lowerCAmelCase : Tuple = self.embedding(_A )
if shape is not None:
_lowerCAmelCase : List[Any] = z_q.view(_A )
# reshape back to match original input shape
_lowerCAmelCase : List[Any] = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
return z_q
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A=False ):
'''simple docstring'''
_lowerCAmelCase : int = parameters
_lowerCAmelCase, _lowerCAmelCase : List[str] = torch.chunk(_A ,2 ,dim=1 )
_lowerCAmelCase : Optional[Any] = torch.clamp(self.logvar ,-3_0.0 ,2_0.0 )
_lowerCAmelCase : Optional[int] = deterministic
_lowerCAmelCase : int = torch.exp(0.5 * self.logvar )
_lowerCAmelCase : Optional[Any] = torch.exp(self.logvar )
if self.deterministic:
_lowerCAmelCase : Optional[int] = torch.zeros_like(
self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype )
def __lowerCamelCase ( self ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : int = randn_tensor(
self.mean.shape ,generator=_A ,device=self.parameters.device ,dtype=self.parameters.dtype )
_lowerCAmelCase : Any = self.mean + self.std * sample
return x
def __lowerCamelCase ( self ,_A=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean ,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar ,dim=[1, 2, 3] ,)
def __lowerCamelCase ( self ,_A ,_A=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
_lowerCAmelCase : Optional[int] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.mean
| 259
| 0
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __A :
"""simple docstring"""
def __init__( self )-> Optional[Any]:
lowercase__ = ''''''
lowercase__ = ''''''
lowercase__ = []
lowercase__ = 0
lowercase__ = 2_5_6
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
def snake_case_( self , _lowerCamelCase )-> List[Any]:
lowercase__ = cva.imread(_lowerCamelCase , 0 )
lowercase__ = copy.deepcopy(self.img )
lowercase__ , lowercase__ , lowercase__ = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label='''x''' )
lowercase__ = np.sum(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
lowercase__ = x[i] / self.k
self.sk += prk
lowercase__ = (self.L - 1) * self.sk
if self.rem != 0:
lowercase__ = int(last % last )
lowercase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_lowerCamelCase )
lowercase__ = int(np.ma.count(self.img ) / self.img[1].size )
lowercase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowercase__ = self.img[j][i]
if num != self.last_list[num]:
lowercase__ = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def snake_case_( self )-> Any:
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def snake_case_( self )-> Optional[int]:
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
_lowerCAmelCase = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
_lowerCAmelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 318
|
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( lowercase : int ) ->bool:
"""simple docstring"""
lowercase__ = str(lowercase )
return len(lowercase ) == 9 and set(lowercase ) == set('''123456789''' )
def _lowerCAmelCase ( ) ->int | None:
"""simple docstring"""
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
lowercase__ = 1_0_0_0_0_2 * base_num
if is_9_pandigital(lowercase ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
lowercase__ = 1_0_0_2_0_0_3 * base_num
if is_9_pandigital(lowercase ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 318
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 75
|
import math
import os
import sys
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : Dict = ''
try:
with open(lowerCamelCase , 'rb' ) as binary_file:
UpperCamelCase_ : Union[str, Any] = binary_file.read()
for dat in data:
UpperCamelCase_ : Optional[int] = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def __lowercase ( lowerCamelCase : dict[str, str] , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : str ):
lexicon.pop(lowerCamelCase )
UpperCamelCase_ : Optional[int] = last_match_id
if math.loga(lowerCamelCase ).is_integer():
for curr_key in lexicon:
UpperCamelCase_ : Optional[int] = '0' + lexicon[curr_key]
UpperCamelCase_ : List[str] = bin(lowerCamelCase )[2:]
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : List[str] = {'0': '0', '1': '1'}
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = '', ''
UpperCamelCase_ : List[str] = len(lowerCamelCase )
for i in range(len(lowerCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase_ : Any = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
index += 1
UpperCamelCase_ : Optional[int] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
UpperCamelCase_ : Any = lexicon[curr_string]
result += last_match_id
return result
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
UpperCamelCase_ : Union[str, Any] = os.path.getsize(lowerCamelCase )
UpperCamelCase_ : List[str] = bin(lowerCamelCase )[2:]
UpperCamelCase_ : int = len(lowerCamelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
UpperCamelCase_ : Optional[int] = 8
try:
with open(lowerCamelCase , 'wb' ) as opened_file:
UpperCamelCase_ : List[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(lowerCamelCase ) , lowerCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(lowerCamelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
UpperCamelCase_ : Dict = read_file_binary(lowerCamelCase )
UpperCamelCase_ : Optional[int] = compress_data(lowerCamelCase )
UpperCamelCase_ : Dict = add_file_length(lowerCamelCase , lowerCamelCase )
write_file_binary(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 417
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase__ = False
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : int):
return 12
@property
def __UpperCamelCase ( self : Tuple):
return 12
@property
def __UpperCamelCase ( self : Dict):
return 32
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self : List[str]):
torch.manual_seed(0)
UpperCamelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = 12
UpperCamelCase__ : Dict = 12
UpperCamelCase__ : Union[str, Any] = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
UpperCamelCase__ : Tuple = TransformeraDModel(**UpperCAmelCase_)
return model
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = 'cpu'
UpperCamelCase__ : List[str] = self.dummy_vqvae
UpperCamelCase__ : List[str] = self.dummy_text_encoder
UpperCamelCase__ : Optional[int] = self.dummy_tokenizer
UpperCamelCase__ : List[str] = self.dummy_transformer
UpperCamelCase__ : Dict = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_)
UpperCamelCase__ : int = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : int = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : Any = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Optional[int] = 'cpu'
UpperCamelCase__ : str = self.dummy_vqvae
UpperCamelCase__ : Any = self.dummy_text_encoder
UpperCamelCase__ : List[Any] = self.dummy_tokenizer
UpperCamelCase__ : Dict = self.dummy_transformer
UpperCamelCase__ : Optional[Any] = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
UpperCamelCase__ : str = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : str = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : int = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : str = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy')
UpperCamelCase__ : List[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq')
UpperCamelCase__ : Any = pipeline.to(UpperCAmelCase_)
pipeline.set_progress_bar_config(disable=UpperCAmelCase_)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCamelCase__ : Optional[int] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : int = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type='np' , )
UpperCamelCase__ : int = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image).max() < 2.0
| 721
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
UpperCamelCase__ : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCAmelCase_)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Union[str, Any]):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : str):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : str):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_slow.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_)
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_fast.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
UpperCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0)
UpperCamelCase__ : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : int = self.prepare_image_inputs()
UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Any = 'lower newer'
UpperCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = tokenizer(UpperCAmelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[int] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'lower newer'
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : str = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_):
processor()
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 6
| 0
|
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase_ : Any = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase_ : Optional[int] = object()
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : int = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(lowerCAmelCase_ ) - len(lowerCAmelCase_ ) + 1 ):
_UpperCAmelCase : List[str] = [x.match(lowerCAmelCase_ ) for x, y in zip(lowerCAmelCase_ , ks[i:] )]
if matches and all(lowerCAmelCase_ ):
return True
return False
def __A ( lowerCAmelCase_ ):
def replace(lowerCAmelCase_ , lowerCAmelCase_ ):
for rule, replacement in rules:
if _match(lowerCAmelCase_ , lowerCAmelCase_ ):
return replacement
return val
return replace
def __A ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , lowerCAmelCase_ )),
(("transformer", "wte", "embedding"), P("""mp""" , lowerCAmelCase_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCAmelCase_ , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , lowerCAmelCase_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCAmelCase_ , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , lowerCAmelCase_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : Tuple = _get_partition_rules()
_UpperCAmelCase : Optional[Any] = _replacement_rules(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = {k: _unmatched for k in flatten_dict(lowerCAmelCase_ )}
_UpperCAmelCase : List[Any] = {k: replace(lowerCAmelCase_ , lowerCAmelCase_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCAmelCase_ ) )
| 414
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase_ : Dict = False
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case_ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case_ (self ):
return 1_2
@property
def snake_case_ (self ):
return 1_2
@property
def snake_case_ (self ):
return 3_2
@property
def snake_case_ (self ):
torch.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def snake_case_ (self ):
_UpperCAmelCase : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def snake_case_ (self ):
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowerCAmelCase__ )
@property
def snake_case_ (self ):
torch.manual_seed(0 )
_UpperCAmelCase : int = 1_2
_UpperCAmelCase : Tuple = 1_2
_UpperCAmelCase : Any = {
"""attention_bias""": True,
"""cross_attention_dim""": 3_2,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 3_2,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
_UpperCAmelCase : Tuple = TransformeraDModel(**lowerCAmelCase__ )
return model
def snake_case_ (self ):
_UpperCAmelCase : List[str] = """cpu"""
_UpperCAmelCase : Any = self.dummy_vqvae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : Tuple = self.dummy_tokenizer
_UpperCAmelCase : List[str] = self.dummy_transformer
_UpperCAmelCase : Tuple = VQDiffusionScheduler(self.num_embed )
_UpperCAmelCase : int = LearnedClassifierFreeSamplingEmbeddings(learnable=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = VQDiffusionPipeline(
vqvae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , transformer=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , learned_classifier_free_sampling_embeddings=lowerCAmelCase__ , )
_UpperCAmelCase : List[Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : int = """teddy bear playing in the pool"""
_UpperCAmelCase : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe([prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="""np""" )
_UpperCAmelCase : Union[str, Any] = output.images
_UpperCAmelCase : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : Optional[int] = pipe(
[prompt] , generator=lowerCAmelCase__ , output_type="""np""" , return_dict=lowerCAmelCase__ , num_inference_steps=2 )[0]
_UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
_UpperCAmelCase : str = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ (self ):
_UpperCAmelCase : Optional[Any] = """cpu"""
_UpperCAmelCase : Tuple = self.dummy_vqvae
_UpperCAmelCase : Dict = self.dummy_text_encoder
_UpperCAmelCase : int = self.dummy_tokenizer
_UpperCAmelCase : Any = self.dummy_transformer
_UpperCAmelCase : List[str] = VQDiffusionScheduler(self.num_embed )
_UpperCAmelCase : str = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowerCAmelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
_UpperCAmelCase : Tuple = VQDiffusionPipeline(
vqvae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , transformer=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , learned_classifier_free_sampling_embeddings=lowerCAmelCase__ , )
_UpperCAmelCase : List[str] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = """teddy bear playing in the pool"""
_UpperCAmelCase : str = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : Dict = pipe([prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="""np""" )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe(
[prompt] , generator=lowerCAmelCase__ , output_type="""np""" , return_dict=lowerCAmelCase__ , num_inference_steps=2 )[0]
_UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
_UpperCAmelCase : List[str] = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case_ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ):
_UpperCAmelCase : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
_UpperCAmelCase : List[Any] = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
_UpperCAmelCase : Union[str, Any] = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
_UpperCAmelCase : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=lowerCAmelCase__ , output_type="""np""" , )
_UpperCAmelCase : Dict = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 414
| 1
|
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Any = BigBirdConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(f'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
snake_case_ : Optional[int] = BigBirdForQuestionAnswering(__SCREAMING_SNAKE_CASE )
else:
snake_case_ : Optional[int] = BigBirdForPreTraining(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, is_trivia_qa=__SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
a_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 717
|
'''simple docstring'''
from __future__ import annotations
import math
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ), minimax(depth + 1, node_index * 2 + 1, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ), )
return min(
minimax(depth + 1, node_index * 2, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ), minimax(depth + 1, node_index * 2 + 1, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ), )
def UpperCamelCase_ ( ):
"""simple docstring"""
snake_case_ : int = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
snake_case_ : Union[str, Any] = math.log(len(__SCREAMING_SNAKE_CASE ), 2 )
print("Optimal value : ", end="" )
print(minimax(0, 0, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 92
| 0
|
'''simple docstring'''
import math
def snake_case_ ( _lowerCAmelCase : Union[str, Any] ) -> list:
UpperCAmelCase : Optional[int] = [True] * n
UpperCAmelCase : str = False
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Tuple = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
UpperCAmelCase : int = i * 2
while index < n:
UpperCAmelCase : int = False
UpperCAmelCase : List[str] = index + i
UpperCAmelCase : Tuple = [2]
for i in range(3 , _lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCAmelCase )
return primes
def snake_case_ ( _lowerCAmelCase : str = 999966663333 ) -> int:
UpperCAmelCase : List[str] = math.floor(math.sqrt(_lowerCAmelCase ) ) + 100
UpperCAmelCase : Union[str, Any] = prime_sieve(_lowerCAmelCase )
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Any = 0
UpperCAmelCase : int = primes[prime_index]
while (last_prime**2) <= limit:
UpperCAmelCase : Dict = primes[prime_index + 1]
UpperCAmelCase : Union[str, Any] = last_prime**2
UpperCAmelCase : Union[str, Any] = next_prime**2
# Get numbers divisible by lps(current)
UpperCAmelCase : int = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
UpperCAmelCase : Tuple = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
UpperCAmelCase : str = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
UpperCAmelCase : Optional[Any] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 127
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE__ : Dict = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"bert-base-uncased": 5_12,
"bert-large-uncased": 5_12,
"bert-base-cased": 5_12,
"bert-large-cased": 5_12,
"bert-base-multilingual-uncased": 5_12,
"bert-base-multilingual-cased": 5_12,
"bert-base-chinese": 5_12,
"bert-base-german-cased": 5_12,
"bert-large-uncased-whole-word-masking": 5_12,
"bert-large-cased-whole-word-masking": 5_12,
"bert-large-uncased-whole-word-masking-finetuned-squad": 5_12,
"bert-large-cased-whole-word-masking-finetuned-squad": 5_12,
"bert-base-cased-finetuned-mrpc": 5_12,
"bert-base-german-dbmdz-cased": 5_12,
"bert-base-german-dbmdz-uncased": 5_12,
"TurkuNLP/bert-base-finnish-cased-v1": 5_12,
"TurkuNLP/bert-base-finnish-uncased-v1": 5_12,
"wietsedv/bert-base-dutch-cased": 5_12,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class a_ ( SCREAMING_SNAKE_CASE__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_INIT_CONFIGURATION
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = BertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="[UNK]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="[PAD]" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , tokenize_chinese_chars=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_ = getattr(SCREAMING_SNAKE_CASE , normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = strip_accents
SCREAMING_SNAKE_CASE_ = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_ = normalizer_class(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = do_lower_case
def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
return tuple(SCREAMING_SNAKE_CASE )
| 205
| 0
|
import os
import numpy
import onnx
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = a.name
A__ = b.name
A__ = ''
A__ = ''
A__ = a == b
A__ = name_a
A__ = name_b
return res
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(a__ , a__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
_graph_replace_input_with(node_proto.attribute[1].g , a__ , a__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
for n in graph_proto.node:
_node_replace_input_with(a__ , a__ , a__ )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
A__ = list(model.graph.initializer )
A__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
A__ = inits[i].name
A__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , a__ , a__ )
def A ( __UpperCamelCase ) -> List[str]:
A__ = os.path.dirname(a__ )
A__ = os.path.basename(a__ )
A__ = onnx.load(os.path.join(a__ , a__ ) )
A__ = list(model.graph.initializer )
A__ = set()
A__ = {}
A__ = []
A__ = 0
for i in range(len(a__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(a__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(a__ )
dup_set.add(a__ )
A__ = inits[j].data_type
A__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , a__ )
total_reduced_size += mem_size
A__ = inits[i].name
A__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(a__ )
else:
A__ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_024 / 1_024 / 1_024 , 'GB' )
A__ = sorted(a__ )
_remove_dup_initializers_from_model(a__ , a__ , a__ )
A__ = 'optimized_' + model_file_name
A__ = os.path.join(a__ , a__ )
onnx.save(a__ , a__ )
return new_model
| 713
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
A__ = args.log_outputs
A__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
A__ = load_metric('wer' )
A__ = load_metric('cer' )
# compute metrics
A__ = wer.compute(references=result['target'] , predictions=result['prediction'] )
A__ = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
A__ = f'''WER: {wer_result}\nCER: {cer_result}'''
print(__UpperCamelCase )
with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(__UpperCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ = f'''log_{dataset_id}_predictions.txt'''
A__ = f'''log_{dataset_id}_targets.txt'''
with open(__UpperCamelCase , 'w' ) as p, open(__UpperCamelCase , 'w' ) as t:
# mapping function to write output
def write_to_file(__UpperCamelCase , __UpperCamelCase ):
p.write(f'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(__UpperCamelCase , with_indices=__UpperCamelCase )
def A ( __UpperCamelCase ) -> str:
A__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ = re.sub(__UpperCamelCase , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
A__ = ' '.join(text.split(__UpperCamelCase ) )
return text
def A ( __UpperCamelCase ) -> Union[str, Any]:
# load dataset
A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__UpperCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ = AutoFeatureExtractor.from_pretrained(args.model_id )
A__ = feature_extractor.sampling_rate
# resample audio
A__ = dataset.cast_column('audio' , Audio(sampling_rate=__UpperCamelCase ) )
# load eval pipeline
if args.device is None:
A__ = 0 if torch.cuda.is_available() else -1
A__ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__UpperCamelCase ):
A__ = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A__ = prediction['text']
A__ = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
A__ = dataset.map(__UpperCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 52
| 0
|
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
assert (
isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
a , a :List[str] = 1, 1
for _ in range(number_of_steps - 1 ):
a , a :Optional[Any] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 445
|
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCAmelCase_ , int(b / 2 ) ) * actual_power(UpperCAmelCase_ , int(b / 2 ) )
else:
return a * actual_power(UpperCAmelCase_ , int(b / 2 ) ) * actual_power(UpperCAmelCase_ , int(b / 2 ) )
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
if b < 0:
return 1 / actual_power(UpperCAmelCase_ , UpperCAmelCase_ )
return actual_power(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 445
| 1
|
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCamelCase ( a , a ) -> Optional[int]:
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__magic_name__ = flax_key_tuple[:-1] + ('''weight''',)
__magic_name__ = torch.permute(a , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(a ):
# linear layer
__magic_name__ = flax_key_tuple[:-1] + ('''weight''',)
__magic_name__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__magic_name__ = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def UpperCamelCase ( a , a , a ) -> Dict:
'''simple docstring'''
if "metadata" in layer:
__magic_name__ = layer.split('''metadata''' )
__magic_name__ = ''''''.join(split_layer[0] )[:-1]
__magic_name__ = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
__magic_name__ = layer.split('''kvstore''' )
__magic_name__ = ''''''.join(split_layer[0] )[:-1]
__magic_name__ = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
__magic_name__ = layer.split('''/''' )
__magic_name__ = '''/'''.join(split_layer[:-1] )
__magic_name__ = (split_layer[-1],)
if "kvstore/path" in layer:
__magic_name__ = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
__magic_name__ = '''file'''
else:
__magic_name__ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCamelCase ( a , a ) -> str:
'''simple docstring'''
__magic_name__ = rename_keys(a )
__magic_name__ = {}
for k, v in current_block.items():
__magic_name__ = v
__magic_name__ = new_current_block
torch.save(a , a )
def UpperCamelCase ( a , a , a , a , a = WEIGHTS_NAME ) -> Dict:
'''simple docstring'''
__magic_name__ = convert_file_size_to_int(a )
__magic_name__ = []
__magic_name__ = {}
__magic_name__ = 0
__magic_name__ = 0
os.makedirs(a , exist_ok=a )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
__magic_name__ = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
__magic_name__ = flatten_dict(a , sep='''/''' )
__magic_name__ = {}
for layer in checkpoint_info.keys():
__magic_name__ , __magic_name__ , __magic_name__ = get_key_and_tensorstore_dict(
a , a , a )
if curr_real_layer_name in all_layers:
__magic_name__ = content
else:
__magic_name__ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__magic_name__ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__magic_name__ = torch.tensor(a )
__magic_name__ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__magic_name__ , __magic_name__ = rename_base_flax_keys(tuple(key.split('''/''' ) ) , a )
__magic_name__ = '''/'''.join(a )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__magic_name__ = os.path.join(
a , weights_name.replace('''.bin''' , F'''-{len(a )+1:05d}-of-???.bin''' ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
del current_block
__magic_name__ = {}
__magic_name__ = 0
__magic_name__ = raw_weights.to(getattr(a , a ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__magic_name__ = os.path.join(a , weights_name.replace('''.bin''' , F'''-{len(a )+1:05d}-of-???.bin''' ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(a ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__magic_name__ = {}
__magic_name__ = {}
for idx, shard in enumerate(a ):
__magic_name__ = weights_name.replace(
'''.bin''' , F'''-{idx+1:05d}-of-{len(a ):05d}.bin''' ) # len(sharded_state_dicts):05d}
__magic_name__ = os.path.join(a , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(a , os.path.join(a , a ) )
__magic_name__ = shard
for key in shard:
__magic_name__ = shard_file
# Add the metadata
__magic_name__ = {'''total_size''': total_size}
__magic_name__ = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(a , a ) , '''w''' , encoding='''utf-8''' ) as f:
__magic_name__ = json.dumps(a , indent=2 , sort_keys=a ) + '''\n'''
f.write(a )
return metadata, index
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
_lowerCAmelCase = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__magic_name__ = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
__magic_name__ = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
__magic_name__ = TaTokenizer.from_pretrained('''t5-small''' )
__magic_name__ = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
__magic_name__ = tokenizer(a , return_tensors='''pt''' ).input_ids
__magic_name__ = model.generate(a , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 245
|
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class _SCREAMING_SNAKE_CASE ( nn.Module ):
__SCREAMING_SNAKE_CASE :int
__SCREAMING_SNAKE_CASE :jnp.dtype = jnp.floataa
def snake_case__ ( self : List[str] ):
__magic_name__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , a__ : List[str] ):
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = hidden_states.shape
__magic_name__ = jax.image.resize(
a__ , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
__magic_name__ = self.conv(a__ )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
__SCREAMING_SNAKE_CASE :int
__SCREAMING_SNAKE_CASE :jnp.dtype = jnp.floataa
def snake_case__ ( self : Any ):
__magic_name__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , a__ : Optional[int] ):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__magic_name__ = self.conv(a__ )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
__SCREAMING_SNAKE_CASE :int
__SCREAMING_SNAKE_CASE :int = None
__SCREAMING_SNAKE_CASE :float = 0.0
__SCREAMING_SNAKE_CASE :bool = None
__SCREAMING_SNAKE_CASE :jnp.dtype = jnp.floataa
def snake_case__ ( self : str ):
__magic_name__ = self.in_channels if self.out_channels is None else self.out_channels
__magic_name__ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__magic_name__ = nn.Conv(
a__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__magic_name__ = nn.Dense(a__ , dtype=self.dtype )
__magic_name__ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__magic_name__ = nn.Dropout(self.dropout_prob )
__magic_name__ = nn.Conv(
a__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__magic_name__ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__magic_name__ = None
if use_nin_shortcut:
__magic_name__ = nn.Conv(
a__ , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self : Optional[Any] , a__ : List[str] , a__ : Dict , a__ : Optional[int]=True ):
__magic_name__ = hidden_states
__magic_name__ = self.norma(a__ )
__magic_name__ = nn.swish(a__ )
__magic_name__ = self.conva(a__ )
__magic_name__ = self.time_emb_proj(nn.swish(a__ ) )
__magic_name__ = jnp.expand_dims(jnp.expand_dims(a__ , 1 ) , 1 )
__magic_name__ = hidden_states + temb
__magic_name__ = self.norma(a__ )
__magic_name__ = nn.swish(a__ )
__magic_name__ = self.dropout(a__ , a__ )
__magic_name__ = self.conva(a__ )
if self.conv_shortcut is not None:
__magic_name__ = self.conv_shortcut(a__ )
return hidden_states + residual
| 245
| 1
|
from __future__ import annotations
import math
def __A ( _A ):
"""simple docstring"""
if num <= 0:
__a = f"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(_A )
__a = [True] * (num + 1)
__a = []
__a = 2
__a = int(math.sqrt(_A ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_A )
# Set multiples of start be False
for i in range(start * start , num + 1 , _A ):
if sieve[i] is True:
__a = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(_A )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 197
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE : List[Any] = logging.getLogger()
SCREAMING_SNAKE_CASE : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A_ ( a_ ):
def _UpperCAmelCase ( self : str , __SCREAMING_SNAKE_CASE : int ):
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
__a = {"source": "What is love ?", "target": "life"}
__a = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
__a = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(__SCREAMING_SNAKE_CASE , f"""{split}.{field}""" ) , "w" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str = "pytorch" ):
__a = self.get_auto_remove_tmp_dir()
__a = os.path.join(__SCREAMING_SNAKE_CASE , "output" )
__a = os.path.join(__SCREAMING_SNAKE_CASE , "data" )
self._create_dummy_data(data_dir=__SCREAMING_SNAKE_CASE )
__a = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
__a = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env() )
__a = os.path.join(__SCREAMING_SNAKE_CASE , "metrics.json" )
with open(__SCREAMING_SNAKE_CASE ) as f:
__a = json.load(__SCREAMING_SNAKE_CASE )
return result
@require_torch_gpu
def _UpperCAmelCase ( self : Dict ):
__a = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def _UpperCAmelCase ( self : Optional[int] ):
__a = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def _UpperCAmelCase ( self : Optional[Any] ):
__a = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _UpperCAmelCase ( self : Any ):
__a = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 197
| 1
|
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def A__ ( __lowerCAmelCase : Optional[Any] ):
lowerCamelCase__ = 0
while number > 0:
lowerCamelCase__ = number % 10
sum_of_digits += last_digit
lowerCamelCase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def A__ ( __lowerCAmelCase : str = 100 ):
lowerCamelCase__ = factorial(_lowerCamelCase )
lowerCamelCase__ = split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 715
|
'''simple docstring'''
import numpy
# List of input, output pairs
UpperCamelCase : List[Any] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCamelCase : Optional[int] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
UpperCamelCase : int = [2, 4, 1, 5]
UpperCamelCase : int = len(train_data)
UpperCamelCase : Dict = 0.009
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : str="train" ):
return calculate_hypothesis_value(__lowerCAmelCase , __lowerCAmelCase ) - output(
__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Any ):
lowerCamelCase__ = 0
for i in range(len(__lowerCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=m ):
lowerCamelCase__ = 0
for i in range(__lowerCAmelCase ):
if index == -1:
summation_value += _error(__lowerCAmelCase )
else:
summation_value += _error(__lowerCAmelCase ) * train_data[i][0][index]
return summation_value
def A__ ( __lowerCAmelCase : List[Any] ):
lowerCamelCase__ = summation_of_cost_derivative(__lowerCAmelCase , __lowerCAmelCase ) / m
return cost_derivative_value
def A__ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCamelCase__ = 0.00_0002
lowerCamelCase__ = 0
lowerCamelCase__ = 0
while True:
j += 1
lowerCamelCase__ = [0, 0, 0, 0]
for i in range(0 , len(__lowerCAmelCase ) ):
lowerCamelCase__ = get_cost_derivative(i - 1 )
lowerCamelCase__ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase , rtol=__lowerCAmelCase , ):
break
lowerCamelCase__ = temp_parameter_vector
print(("""Number of iterations:""", j) )
def A__ ( ):
for i in range(len(__lowerCAmelCase ) ):
print(("""Actual output value:""", output(__lowerCAmelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(__lowerCAmelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 9
| 0
|
import math
def A__ ( snake_case_ : List[str] , snake_case_ : Optional[Any] ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(snake_case_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('''This should never happen''' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase_ : Dict = 'Enter the base and the power separated by a comma: '
lowercase_ , lowercase_ : Tuple = map(int, input(prompt).split(','))
lowercase_ , lowercase_ : Any = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase_ : Tuple = res(xa, ya)
lowercase_ : Union[str, Any] = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 64
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "bert"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Tuple = vocab_size
__snake_case : Dict = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : Any = intermediate_size
__snake_case : List[str] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : int = max_position_embeddings
__snake_case : str = type_vocab_size
__snake_case : Any = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : List[Any] = position_embedding_type
__snake_case : Dict = use_cache
__snake_case : str = classifier_dropout
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
@property
def lowercase_ ( self ):
if self.task == "multiple-choice":
__snake_case : Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case : Tuple = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 576
| 0
|
import math
from numpy import inf
from scipy.integrate import quad
def snake_case_ (__A : float ) -> float:
if num <= 0:
raise ValueError("""math domain error""" )
return quad(__A , 0 , __A , args=(__A) )[0]
def snake_case_ (__A : float , __A : float ) -> float:
return math.pow(__A , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 218
|
def snake_case_ (__A : int = 1_0**9 ) -> int:
__lowerCAmelCase : Any = 1
__lowerCAmelCase : Optional[int] = 2
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : Dict = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__lowerCAmelCase : int = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'{solution() = }')
| 218
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif stress < 0:
raise ValueError("""Stress cannot be negative""" )
elif tangential_force < 0:
raise ValueError("""Tangential Force cannot be negative""" )
elif area < 0:
raise ValueError("""Area cannot be negative""" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65
|
"""simple docstring"""
def lowercase_ ( _lowercase : int ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError("only integers accepted as input" )
else:
UpperCAmelCase : Optional[int] = str(abs(_lowercase ) )
UpperCAmelCase : Union[str, Any] = [list(_lowercase ) for char in range(len(_lowercase ) )]
for index in range(len(_lowercase ) ):
num_transpositions[index].pop(_lowercase )
return max(
int("".join(list(_lowercase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 595
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __magic_name__ ( __a ):
"""simple docstring"""
lowerCAmelCase : Tuple = '''convbert'''
def __init__( self : str , _lowercase : Dict=30_522 , _lowercase : List[Any]=768 , _lowercase : Any=12 , _lowercase : Optional[int]=12 , _lowercase : Any=3_072 , _lowercase : Union[str, Any]="gelu" , _lowercase : List[str]=0.1 , _lowercase : List[str]=0.1 , _lowercase : List[str]=512 , _lowercase : str=2 , _lowercase : List[str]=0.02 , _lowercase : Optional[int]=1E-12 , _lowercase : Any=1 , _lowercase : Optional[Any]=0 , _lowercase : Tuple=2 , _lowercase : int=768 , _lowercase : List[str]=2 , _lowercase : Optional[int]=9 , _lowercase : Optional[int]=1 , _lowercase : List[Any]=None , **_lowercase : Tuple , ):
"""simple docstring"""
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
_UpperCamelCase: List[Any] = vocab_size
_UpperCamelCase: Union[str, Any] = hidden_size
_UpperCamelCase: List[str] = num_hidden_layers
_UpperCamelCase: Tuple = num_attention_heads
_UpperCamelCase: Optional[int] = intermediate_size
_UpperCamelCase: Dict = hidden_act
_UpperCamelCase: List[Any] = hidden_dropout_prob
_UpperCamelCase: Optional[Any] = attention_probs_dropout_prob
_UpperCamelCase: Tuple = max_position_embeddings
_UpperCamelCase: int = type_vocab_size
_UpperCamelCase: Dict = initializer_range
_UpperCamelCase: Tuple = layer_norm_eps
_UpperCamelCase: Optional[int] = embedding_size
_UpperCamelCase: Dict = head_ratio
_UpperCamelCase: List[str] = conv_kernel_size
_UpperCamelCase: str = num_groups
_UpperCamelCase: Dict = classifier_dropout
class __magic_name__ ( __a ):
"""simple docstring"""
@property
def lowerCAmelCase ( self : int ):
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCamelCase: List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCamelCase: str = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 264
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
UpperCAmelCase_ = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def lowerCAmelCase_ ( lowercase: str , lowercase: Optional[Any]=None , lowercase: List[str]=None , lowercase: List[str]=None ) -> Tuple:
'''simple docstring'''
_UpperCamelCase: int = True
while ask_again:
_UpperCamelCase: Any = input(lowercase )
try:
if default is not None and len(lowercase ) == 0:
return default
return convert_value(lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowercase )
def lowerCAmelCase_ ( lowercase: List[Any] , lowercase: Union[str, Any]=[] , lowercase: Union[str, Any]=None , lowercase: Union[str, Any]=0 ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase: List[str] = BulletMenu(lowercase , lowercase )
_UpperCamelCase: List[Any] = menu.run(default_choice=lowercase )
return convert_value(lowercase ) if convert_value is not None else result
def lowerCAmelCase_ ( lowercase: Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase: int = int(lowercase )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def lowerCAmelCase_ ( lowercase: Tuple ) -> Any:
'''simple docstring'''
_UpperCamelCase: str = int(lowercase )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def lowerCAmelCase_ ( lowercase: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase: Tuple = int(lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowerCAmelCase_ ( lowercase: int ) -> Any:
'''simple docstring'''
_UpperCamelCase: int = int(lowercase )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def lowerCAmelCase_ ( lowercase: List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase: Dict = int(lowercase )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def lowerCAmelCase_ ( lowercase: Any ) -> List[str]:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class __magic_name__ ( argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def lowerCAmelCase ( self : str , _lowercase : Optional[int] , _lowercase : List[Any] , _lowercase : Dict , _lowercase : Any ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = super()._format_usage(_lowercase , _lowercase , _lowercase , _lowercase )
_UpperCamelCase: int = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 264
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 221
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : float | Decimal , __magic_name__ : float = 10**-10 ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = a
while True:
_lowerCAmelCase :str = Decimal(__magic_name__ ) - (
Decimal(eval(__magic_name__ ) ) / Decimal(eval(str(diff(__magic_name__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__magic_name__ ) ) < precision: # noqa: S307
return float(__magic_name__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
| 687
| 0
|
"""simple docstring"""
def __a ( _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : Union[str, Any] = len(_lowercase )
for i in range(n - 1 ):
for j in range(i + 1 , _lowercase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def __a ( _lowercase ):
"""simple docstring"""
if len(_lowercase ) <= 1:
return arr, 0
lowerCamelCase__ : Tuple = len(_lowercase ) // 2
lowerCamelCase__ : Union[str, Any] = arr[0:mid]
lowerCamelCase__ : Dict = arr[mid:]
lowerCamelCase__ : List[str] = count_inversions_recursive(_lowercase )
lowerCamelCase__ : Any = count_inversions_recursive(_lowercase )
lowerCamelCase__ : Union[str, Any] = _count_cross_inversions(_lowercase , _lowercase )
lowerCamelCase__ : Optional[Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def __a ( _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Any = []
lowerCamelCase__ : Dict = 0
while i < len(_lowercase ) and j < len(_lowercase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_lowercase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_lowercase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def __a ( ):
"""simple docstring"""
lowerCamelCase__ : Tuple = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowerCamelCase__ : Optional[Any] = count_inversions_bf(_lowercase )
lowerCamelCase__ : Any = count_inversions_recursive(_lowercase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , _lowercase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowerCamelCase__ : List[Any] = count_inversions_bf(_lowercase )
lowerCamelCase__ : Tuple = count_inversions_recursive(_lowercase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , _lowercase )
# an empty list should also have zero inversions
lowerCamelCase__ : int = []
lowerCamelCase__ : Any = count_inversions_bf(_lowercase )
lowerCamelCase__ : List[Any] = count_inversions_recursive(_lowercase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , _lowercase )
if __name__ == "__main__":
main()
| 712
|
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def __a ( _lowercase , _lowercase , _lowercase=None , **_lowercase ):
"""simple docstring"""
lowerCamelCase__ : str = [x.strip() for x in open(_lowercase ).readlines()]
lowerCamelCase__ : str = [x.strip() for x in open(_lowercase ).readlines()][: len(_lowercase )]
lowerCamelCase__ : Optional[int] = calculate_rouge(_lowercase , _lowercase , **_lowercase )
if save_path is not None:
save_json(_lowercase , _lowercase , indent=_lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 121
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Optional[int] = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 587
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """camembert"""
def __init__( self , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1e-1_2 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , **A , ) -> Any:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
snake_case : int = vocab_size
snake_case : Dict = hidden_size
snake_case : Any = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : Any = hidden_act
snake_case : List[str] = intermediate_size
snake_case : List[str] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Dict = max_position_embeddings
snake_case : str = type_vocab_size
snake_case : List[Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Optional[int] = position_embedding_type
snake_case : Tuple = use_cache
snake_case : Tuple = classifier_dropout
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 587
| 1
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Dict = "blip_text_model"
def __init__( self , A_=3_0524 , A_=768 , A_=768 , A_=3072 , A_=768 , A_=12 , A_=8 , A_=512 , A_="gelu" , A_=1e-12 , A_=0.0 , A_=0.0 , A_=0.0_2 , A_=3_0522 , A_=2 , A_=0 , A_=102 , A_=True , A_=True , **A_ , ) -> str:
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , sep_token_id=A_ , **A_ , )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = encoder_hidden_size
lowerCAmelCase = intermediate_size
lowerCAmelCase = projection_dim
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = hidden_act
lowerCAmelCase = initializer_range
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = is_decoder
lowerCAmelCase = use_cache
@classmethod
def __snake_case ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
lowerCAmelCase, lowerCAmelCase = cls.get_config_dict(A_ , **A_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
lowerCAmelCase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[Any] = "blip_vision_model"
def __init__( self , A_=768 , A_=3072 , A_=512 , A_=12 , A_=12 , A_=384 , A_=16 , A_="gelu" , A_=1e-5 , A_=0.0 , A_=1e-10 , **A_ , ) -> Dict:
super().__init__(**A_ )
lowerCAmelCase = hidden_size
lowerCAmelCase = intermediate_size
lowerCAmelCase = projection_dim
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = patch_size
lowerCAmelCase = image_size
lowerCAmelCase = initializer_range
lowerCAmelCase = attention_dropout
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = hidden_act
@classmethod
def __snake_case ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
lowerCAmelCase, lowerCAmelCase = cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
lowerCAmelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = "blip"
UpperCAmelCase : str = True
def __init__( self , A_=None , A_=None , A_=512 , A_=2.6_5_9_2 , A_=256 , **A_ , ) -> int:
super().__init__(**A_ )
if text_config is None:
lowerCAmelCase = {}
logger.info("""`text_config` is `None`. Initializing the `BlipTextConfig` with default values.""" )
if vision_config is None:
lowerCAmelCase = {}
logger.info("""`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.""" )
lowerCAmelCase = BlipTextConfig(**A_ )
lowerCAmelCase = BlipVisionConfig(**A_ )
lowerCAmelCase = self.vision_config.hidden_size
lowerCAmelCase = projection_dim
lowerCAmelCase = logit_scale_init_value
lowerCAmelCase = 1.0
lowerCAmelCase = 0.0_2
lowerCAmelCase = image_text_hidden_size
@classmethod
def __snake_case ( cls , A_ , A_ , **A_ ) -> Tuple:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A_ )
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = copy.deepcopy(self.__dict__ )
lowerCAmelCase = self.text_config.to_dict()
lowerCAmelCase = self.vision_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
| 719
|
'''simple docstring'''
def _snake_case ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] ) -> str:
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowerCAmelCase = mf_knapsack(i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = max(
mf_knapsack(i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , mf_knapsack(i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , j - wt[i - 1] ) + val[i - 1] , )
lowerCAmelCase = val
return f[i][j]
def _snake_case ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
lowerCAmelCase = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowerCAmelCase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowerCAmelCase = dp[i - 1][w_]
return dp[n][w_], dp
def _snake_case ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list ) -> List[str]:
"""simple docstring"""
if not (isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
if num_items != len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = (
"""The number of weights must be the same as the number of values.\n"""
f'But got {num_items} weights and {len(_SCREAMING_SNAKE_CASE )} values'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
if not isinstance(wt[i] , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = (
"""All weights must be integers but got weight of """
f'type {type(wt[i] )} at index {i}'
)
raise TypeError(_SCREAMING_SNAKE_CASE )
lowerCAmelCase, lowerCAmelCase = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = set()
_construct_solution(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return optimal_val, example_optional_set
def _snake_case ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : set ) -> str:
"""simple docstring"""
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
optimal_set.add(_SCREAMING_SNAKE_CASE )
_construct_solution(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , i - 1 , j - wt[i - 1] , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = [3, 2, 4, 4]
UpperCAmelCase = [4, 3, 2, 3]
UpperCAmelCase = 4
UpperCAmelCase = 6
UpperCAmelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
UpperCAmelCase , UpperCAmelCase = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
UpperCAmelCase , UpperCAmelCase = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 344
| 0
|
'''simple docstring'''
import math
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = 0 , UpperCamelCase__ = 0 ) -> list:
__lowerCamelCase = end or len(UpperCamelCase__ )
for i in range(UpperCamelCase__ , UpperCamelCase__ ):
__lowerCamelCase = i
__lowerCamelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__lowerCamelCase = array[temp_index - 1]
temp_index -= 1
__lowerCamelCase = temp_index_value
return array
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None: # Max Heap
__lowerCamelCase = index
__lowerCamelCase = 2 * index + 1 # Left Node
__lowerCamelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__lowerCamelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__lowerCamelCase = right_index
if largest != index:
__lowerCamelCase , __lowerCamelCase = array[largest], array[index]
heapify(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __lowerCAmelCase ( UpperCamelCase__ ) -> list:
__lowerCamelCase = len(UpperCamelCase__ )
for i in range(n // 2 , -1 , -1 ):
heapify(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for i in range(n - 1 , 0 , -1 ):
__lowerCamelCase , __lowerCamelCase = array[0], array[i]
heapify(UpperCamelCase__ , 0 , UpperCamelCase__ )
return array
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
__lowerCamelCase = low
__lowerCamelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__lowerCamelCase , __lowerCamelCase = array[j], array[i]
i += 1
def __lowerCAmelCase ( UpperCamelCase__ ) -> list:
if len(UpperCamelCase__ ) == 0:
return array
__lowerCamelCase = 2 * math.ceil(math.loga(len(UpperCamelCase__ ) ) )
__lowerCamelCase = 16
return intro_sort(UpperCamelCase__ , 0 , len(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(UpperCamelCase__ )
max_depth -= 1
__lowerCamelCase = median_of_a(UpperCamelCase__ , UpperCamelCase__ , start + ((end - start) // 2) + 1 , end - 1 )
__lowerCamelCase = partition(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
intro_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = p
return insertion_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase =input("Enter numbers separated by a comma : ").strip()
__UpperCAmelCase =[float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 546
|
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
def count_of_possible_combinations(UpperCamelCase__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(UpperCamelCase__ )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
def count_of_possible_combinations_with_dp_array(
UpperCamelCase__ , UpperCamelCase__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__lowerCamelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , UpperCamelCase__ )
for item in array )
__lowerCamelCase = answer
return answer
__lowerCamelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(UpperCamelCase__ , UpperCamelCase__ )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
__lowerCamelCase = [0] * (target + 1)
__lowerCamelCase = 1
for i in range(1 , target + 1 ):
for j in range(UpperCamelCase__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase =3
__UpperCAmelCase =5
__UpperCAmelCase =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 546
| 1
|
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: Any , a: Optional[int] , a: Optional[int] ):
super().__init__()
self.register_modules(unet=a , scheduler=a )
@torch.no_grad()
def __call__( self: Optional[Any] , a: int = 1 , a: Optional[torch.Generator] = None , a: int = 50 , a: Optional[str] = "pil" , a: bool = True , **a: List[Any] , ):
__lowerCamelCase : Optional[int] = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a , )
__lowerCamelCase : List[str] = image.to(self.device )
# set step values
self.scheduler.set_timesteps(a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__lowerCamelCase : int = self.unet(a , a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__lowerCamelCase : str = self.scheduler.step(a , a , a ).prev_sample
__lowerCamelCase : int = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase : Optional[Any] = self.numpy_to_pil(a )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=a ), "This is a local test"
| 230
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 230
| 1
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class UpperCamelCase__( unittest.TestCase ):
def __init__( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any=7 , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Optional[int]=30 , lowerCAmelCase : Dict=400 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : int=None , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=[0.5, 0.5, 0.5] , lowerCAmelCase : Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=1 / 255 , lowerCAmelCase : Union[str, Any]=True , )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = num_channels
UpperCAmelCase = min_resolution
UpperCAmelCase = max_resolution
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean
UpperCAmelCase = image_std
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_pad
def a__( self : List[str] )-> str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def a__( self : Dict , lowerCAmelCase : str , lowerCAmelCase : int=False )-> List[Any]:
"""simple docstring"""
if not batched:
UpperCAmelCase = image_inputs[0]
if isinstance(a_ , Image.Image ):
UpperCAmelCase , UpperCAmelCase = image.size
else:
UpperCAmelCase , UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase = self.size['''shortest_edge''']
UpperCAmelCase = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase = self.size['''shortest_edge''']
UpperCAmelCase = self.size['''shortest_edge''']
else:
UpperCAmelCase = []
for image in image_inputs:
UpperCAmelCase , UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase = max(a_ , key=lambda lowerCAmelCase : item[0] )[0]
UpperCAmelCase = max(a_ , key=lambda lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Tuple = DetaImageProcessor if is_vision_available() else None
def a__( self : Optional[Any] )-> Any:
"""simple docstring"""
UpperCAmelCase = DetaImageProcessingTester(self )
@property
def a__( self : Tuple )-> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__( self : str )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , '''image_mean''' ) )
self.assertTrue(hasattr(a_ , '''image_std''' ) )
self.assertTrue(hasattr(a_ , '''do_normalize''' ) )
self.assertTrue(hasattr(a_ , '''do_resize''' ) )
self.assertTrue(hasattr(a_ , '''do_rescale''' ) )
self.assertTrue(hasattr(a_ , '''do_pad''' ) )
self.assertTrue(hasattr(a_ , '''size''' ) )
def a__( self : str )-> str:
"""simple docstring"""
UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , a_ )
def a__( self : Tuple )-> str:
"""simple docstring"""
pass
def a__( self : List[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(a_ , batched=a_ )
UpperCAmelCase = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__( self : Union[str, Any] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase = image_processing(a_ , return_tensors='''pt''' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(a_ , batched=a_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase = image_processing(a_ , return_tensors='''pt''' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(a_ , batched=a_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a__( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase = json.loads(f.read() )
UpperCAmelCase = {'''image_id''': 39769, '''annotations''': target}
# encode them
UpperCAmelCase = DetaImageProcessor()
UpperCAmelCase = image_processing(images=a_ , annotations=a_ , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , a_ )
UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , a_ , atol=1E-4 ) )
# verify area
UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , a_ ) )
# verify boxes
UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , a_ )
UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , a_ , atol=1E-3 ) )
# verify image_id
UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , a_ ) )
# verify is_crowd
UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , a_ ) )
# verify class_labels
UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , a_ ) )
# verify orig_size
UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , a_ ) )
# verify size
UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , a_ ) )
@slow
def a__( self : str )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase = json.loads(f.read() )
UpperCAmelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
UpperCAmelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCAmelCase = DetaImageProcessor(format='''coco_panoptic''' )
UpperCAmelCase = image_processing(images=a_ , annotations=a_ , masks_path=a_ , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , a_ )
UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , a_ , atol=1E-4 ) )
# verify area
UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , a_ ) )
# verify boxes
UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , a_ )
UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , a_ , atol=1E-3 ) )
# verify image_id
UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , a_ ) )
# verify is_crowd
UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , a_ ) )
# verify class_labels
UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , a_ ) )
# verify masks
UpperCAmelCase = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , a_ )
# verify orig_size
UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , a_ ) )
# verify size
UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , a_ ) )
| 210
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_a : List[Any] = logging.get_logger(__name__)
class lowercase_ ( a ):
'''simple docstring'''
def __init__( self , *a_ , **a_ ) -> None:
"""simple docstring"""
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' , a_ , )
super().__init__(*a_ , **a_ )
| 447
| 0
|
import enum
import shutil
import sys
lowerCamelCase , lowerCamelCase :Any = shutil.get_terminal_size()
lowerCamelCase :List[str] = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class UpperCAmelCase ( enum.Enum ):
a: Optional[Any] = 0
a: int = 1
def __snake_case ( _UpperCamelCase , _UpperCamelCase="" ) -> Optional[int]:
sys.stdout.write(str(_UpperCamelCase ) + end )
sys.stdout.flush()
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="" ) -> List[Any]:
forceWrite(f"\u001b[{color}m{content}\u001b[0m" , _UpperCamelCase )
def __snake_case ( ) -> Optional[Any]:
forceWrite('''\r''' )
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
forceWrite(f"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}" )
def __snake_case ( ) -> Any:
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def __snake_case ( ) -> Any:
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 346
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase :List[Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :str = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCamelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 346
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
A = logging.get_logger('''transformers.models.encodec''')
A = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
A = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
A = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
A = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
A = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
A = []
A = []
def __A ( a_ :Optional[Any] , a_ :Tuple , a_ :str , a_ :List[str] , a_ :int) -> int:
for attribute in key.split('''.'''):
__a : List[Any] = getattr(a_ , a_)
if weight_type is not None:
__a : List[str] = getattr(a_ , a_).shape
else:
__a : Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""")
if weight_type == "weight":
__a : int = value
elif weight_type == "weight_g":
__a : int = value
elif weight_type == "weight_v":
__a : Any = value
elif weight_type == "bias":
__a : Optional[int] = value
elif weight_type == "running_mean":
__a : List[str] = value
elif weight_type == "running_var":
__a : Optional[int] = value
elif weight_type == "num_batches_tracked":
__a : Dict = value
elif weight_type == "weight_ih_l0":
__a : Any = value
elif weight_type == "weight_hh_l0":
__a : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
__a : Optional[int] = value
elif weight_type == "bias_hh_l0":
__a : Any = value
elif weight_type == "weight_ih_l1":
__a : Union[str, Any] = value
elif weight_type == "weight_hh_l1":
__a : Dict = value
elif weight_type == "bias_ih_l1":
__a : List[str] = value
elif weight_type == "bias_hh_l1":
__a : Optional[int] = value
else:
__a : Dict = value
logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""")
def __A ( a_ :Dict , a_ :Union[str, Any]) -> Dict:
for key in ignore_keys:
if key.endswith('''.*'''):
if name.startswith(key[:-1]):
return True
elif ".*." in key:
__a , __a : Optional[Any] = key.split('''.*.''')
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __A ( a_ :Any , a_ :str , a_ :Optional[Any]) -> Union[str, Any]:
__a : Optional[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
__a : str = MAPPING_24K
elif model_name == "encodec_48khz":
__a : List[Any] = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""")
for name, value in orig_dict.items():
if should_ignore(a_ , a_):
logger.info(F"""{name} was ignored""")
continue
__a : Optional[Any] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
__a , __a : Dict = key.split('''.*.''')
if prefix in name and suffix in name:
__a : Dict = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''') and name.endswith('''embed_avg'''):
continue
__a : Optional[int] = True
if "*" in mapped_key:
__a : List[Any] = name.split(a_)[0].split('''.''')[-2]
__a : Union[str, Any] = mapped_key.replace('''*''' , a_)
if "weight_g" in name:
__a : Tuple = '''weight_g'''
elif "weight_v" in name:
__a : Optional[Any] = '''weight_v'''
elif "weight_ih_l0" in name:
__a : Union[str, Any] = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
__a : Tuple = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
__a : List[str] = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
__a : str = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
__a : int = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
__a : Optional[int] = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
__a : Tuple = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
__a : List[str] = '''bias_hh_l1'''
elif "bias" in name:
__a : Optional[int] = '''bias'''
elif "weight" in name:
__a : str = '''weight'''
elif "running_mean" in name:
__a : int = '''running_mean'''
elif "running_var" in name:
__a : Optional[Any] = '''running_var'''
elif "num_batches_tracked" in name:
__a : List[Any] = '''num_batches_tracked'''
else:
__a : Tuple = None
set_recursively(a_ , a_ , a_ , a_ , a_)
continue
if not is_used:
unused_weights.append(a_)
logger.warning(F"""Unused weights: {unused_weights}""")
@torch.no_grad()
def __A ( a_ :int , a_ :List[Any] , a_ :Optional[Any] , a_ :Any=None , a_ :int=None , ) -> List[Any]:
if config_path is not None:
__a : Any = EncodecConfig.from_pretrained(a_)
else:
__a : Dict = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
__a : int = [8, 5, 4, 4]
__a : List[Any] = [2.2]
__a : List[Any] = 64
__a : Tuple = 3_20_00
__a : str = 20_48
__a : Optional[int] = False
__a : List[Any] = False
__a : str = False
elif model_name == "encodec_48khz":
__a : List[str] = [8, 5, 4, 2]
__a : Dict = [3.0, 6.0, 1_2.0, 2_4.0]
__a : Optional[int] = 4_80_00
__a : List[Any] = 2
__a : Optional[int] = False
__a : Dict = '''time_group_norm'''
__a : Optional[int] = True
__a : Any = 1.0
__a : Optional[int] = 0.0_1
else:
raise ValueError(F"""Unknown model name: {model_name}""")
__a : Optional[Any] = EncodecModel(a_)
__a : Dict = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(a_)
__a : int = torch.load(a_)
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
__a : List[str] = original_checkpoint['''best_state''']
recursively_load_weights(a_ , a_ , a_)
model.save_pretrained(a_)
if repo_id:
print('''Pushing to the hub...''')
feature_extractor.push_to_hub(a_)
model.push_to_hub(a_)
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
A = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 52
|
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _a ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _a ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
snake_case__ : Union[str, Any] = tmp_path / '''cache'''
snake_case__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ : Union[str, Any] = ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read()
_check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _a ( __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
snake_case__ : Any = tmp_path / '''cache'''
snake_case__ : Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
snake_case__ : Dict = features.copy() if features else default_expected_features
snake_case__ : Any = (
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ : Any = ParquetDatasetReader(__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _a ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
snake_case__ : List[Any] = tmp_path / '''cache'''
snake_case__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
snake_case__ : Optional[int] = ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase , split=__lowerCAmelCase ).read()
_check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def _a ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict ):
"""simple docstring"""
if issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Dict = parquet_path
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : List[Any] = [parquet_path]
snake_case__ : List[Any] = tmp_path / '''cache'''
snake_case__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
snake_case__ : Optional[int] = ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase )
def _a ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any]=("train",) ):
"""simple docstring"""
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
for split in splits:
snake_case__ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _a ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict ):
"""simple docstring"""
snake_case__ : Any = tmp_path / '''cache'''
snake_case__ : List[str] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ : Dict = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read()
_check_parquet_datasetdict(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _a ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ):
"""simple docstring"""
snake_case__ : Any = tmp_path / '''cache'''
snake_case__ : Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
snake_case__ : Optional[Any] = features.copy() if features else default_expected_features
snake_case__ : str = (
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ : Optional[int] = ParquetDatasetReader({'''train''': parquet_path} , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_parquet_datasetdict(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _a ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : int ):
"""simple docstring"""
if split:
snake_case__ : List[str] = {split: parquet_path}
else:
snake_case__ : Any = '''train'''
snake_case__ : Union[str, Any] = {'''train''': parquet_path, '''test''': parquet_path}
snake_case__ : Any = tmp_path / '''cache'''
snake_case__ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
snake_case__ : Union[str, Any] = ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_parquet_datasetdict(__lowerCAmelCase , __lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _a ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
snake_case__ : Optional[int] = ParquetDatasetWriter(__lowerCAmelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
snake_case__ : str = pq.ParquetFile(tmp_path / '''foo.parquet''' )
snake_case__ : Union[str, Any] = pf.read()
assert dataset.data.table == output_table
def _a ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
snake_case__ : Tuple = str(shared_datadir / '''test_image_rgb.jpg''' )
snake_case__ : List[Any] = {'''image''': [image_path]}
snake_case__ : Union[str, Any] = Features({'''image''': Image()} )
snake_case__ : Optional[Any] = Dataset.from_dict(__lowerCAmelCase , features=__lowerCAmelCase )
snake_case__ : int = ParquetDatasetWriter(__lowerCAmelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
snake_case__ : str = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
snake_case__ : Optional[Any] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=__lowerCAmelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def _a ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
assert get_writer_batch_size(__lowerCAmelCase ) == expected
| 347
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : List[Any] = logging.get_logger(__name__)
def _lowerCamelCase ( lowerCamelCase_ : str ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase_ : List[str] = 192
UpperCAmelCase_ : Tuple = 768
UpperCAmelCase_ : Optional[Any] = 12
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Union[str, Any] = [800, 1333]
UpperCAmelCase_ : Optional[int] = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase_ : str = 330
UpperCAmelCase_ : List[str] = 14
UpperCAmelCase_ : Optional[Any] = 6
UpperCAmelCase_ : int = 1320
elif "yolos_s" in yolos_name:
UpperCAmelCase_ : Union[str, Any] = 384
UpperCAmelCase_ : Dict = 1536
UpperCAmelCase_ : Dict = 12
UpperCAmelCase_ : Optional[Any] = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase_ : List[str] = [800, 1344]
UpperCAmelCase_ : List[str] = 91
UpperCAmelCase_ : Union[str, Any] = 'huggingface/label-files'
UpperCAmelCase_ : Dict = 'coco-detection-id2label.json'
UpperCAmelCase_ : int = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
UpperCAmelCase_ : Dict = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Any = idalabel
UpperCAmelCase_ : Tuple = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase ( lowerCamelCase_ : dict , lowerCamelCase_ : YolosConfig , lowerCamelCase_ : bool = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ : Optional[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase_ : List[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Optional[Any] = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase_ : Optional[int] = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ : List[Any] = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase_ : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( lowerCamelCase_ : str ):
"""simple docstring"""
if "backbone" in name:
UpperCAmelCase_ : Optional[int] = name.replace('backbone' , 'vit' )
if "cls_token" in name:
UpperCAmelCase_ : Dict = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
UpperCAmelCase_ : Optional[int] = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
UpperCAmelCase_ : Any = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
UpperCAmelCase_ : Any = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
UpperCAmelCase_ : List[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
UpperCAmelCase_ : Optional[Any] = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
UpperCAmelCase_ : str = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCAmelCase_ : Any = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCAmelCase_ : List[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCAmelCase_ : Any = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCAmelCase_ : Optional[int] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCAmelCase_ : List[str] = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
UpperCAmelCase_ : Tuple = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
UpperCAmelCase_ : List[Any] = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
UpperCAmelCase_ : List[str] = name.replace('vit.norm' , 'vit.layernorm' )
return name
def _lowerCamelCase ( lowerCamelCase_ : dict , lowerCamelCase_ : YolosForObjectDetection ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : Tuple = orig_state_dict.pop(lowerCamelCase_ )
if "qkv" in key:
UpperCAmelCase_ : List[Any] = key.split('.' )
UpperCAmelCase_ : List[str] = int(key_split[2] )
UpperCAmelCase_ : Any = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase_ : Optional[Any] = val[:dim, :]
UpperCAmelCase_ : Union[str, Any] = val[
dim : dim * 2, :
]
UpperCAmelCase_ : List[Any] = val[-dim:, :]
else:
UpperCAmelCase_ : int = val[:dim]
UpperCAmelCase_ : Tuple = val[dim : dim * 2]
UpperCAmelCase_ : Any = val[-dim:]
else:
UpperCAmelCase_ : int = val
return orig_state_dict
def _lowerCamelCase ( ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCAmelCase_ : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def _lowerCamelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : bool = False ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = get_yolos_config(lowerCamelCase_ )
# load original state_dict
UpperCAmelCase_ : Union[str, Any] = torch.load(lowerCamelCase_ , map_location='cpu' )['model']
# load 🤗 model
UpperCAmelCase_ : int = YolosForObjectDetection(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = convert_state_dict(lowerCamelCase_ , lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase_ : int = 800 if yolos_name != 'yolos_ti' else 512
UpperCAmelCase_ : List[str] = YolosImageProcessor(format='coco_detection' , size=lowerCamelCase_ )
UpperCAmelCase_ : int = image_processor(images=prepare_img() , return_tensors='pt' )
UpperCAmelCase_ : List[Any] = model(**lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = outputs.logits, outputs.pred_boxes
UpperCAmelCase_ , UpperCAmelCase_ : Any = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase_ : int = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
UpperCAmelCase_ : Dict = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase_ : Optional[Any] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
UpperCAmelCase_ : str = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
UpperCAmelCase_ : Tuple = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase_ : List[Any] = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
UpperCAmelCase_ : int = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
UpperCAmelCase_ : Optional[Any] = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
UpperCAmelCase_ : int = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase_ , atol=1e-4 )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
UpperCAmelCase_ : Tuple = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
UpperCAmelCase_ : Dict = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase_ , organization='hustvl' )
model.push_to_hub(lowerCamelCase_ , organization='hustvl' )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
snake_case__ : List[str] = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 389
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :Tuple = ['''image_processor''', '''tokenizer''']
lowerCamelCase_ :Any = '''FlavaImageProcessor'''
lowerCamelCase_ :List[str] = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case_ , )
UpperCAmelCase_ : Optional[int] = kwargs.pop('feature_extractor' )
UpperCAmelCase_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case_ , snake_case_ )
UpperCAmelCase_ : Optional[int] = self.image_processor
def __call__( self , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = False , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
UpperCAmelCase_ : int = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
if images is not None:
UpperCAmelCase_ : Optional[Any] = self.image_processor(
snake_case_ , return_image_mask=snake_case_ , return_codebook_pixels=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
if text is not None and images is not None:
encoding.update(snake_case_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def _UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def _UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.tokenizer.model_input_names
UpperCAmelCase_ : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case_ , )
return self.image_processor_class
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case_ , )
return self.image_processor
| 389
| 1
|
'''simple docstring'''
from __future__ import annotations
class snake_case__ :
def __init__( self : str , __a : str=None ) -> int:
'''simple docstring'''
__snake_case : Union[str, Any] = data
__snake_case : List[Any] = None
def __repr__( self : Dict ) -> Tuple:
'''simple docstring'''
__snake_case : List[Any] = []
__snake_case : Optional[Any] = self
while temp:
string_rep.append(f'''{temp.data}''' )
__snake_case : Any = temp.next
return "->".join(__a )
def a_ ( _UpperCAmelCase : list ) -> Optional[int]:
if not elements_list:
raise Exception('The Elements List is empty' )
__snake_case : Dict = Node(elements_list[0] )
for i in range(1 ,len(_UpperCAmelCase ) ):
__snake_case : Union[str, Any] = Node(elements_list[i] )
__snake_case : List[Any] = current.next
return head
def a_ ( _UpperCAmelCase : Node ) -> None:
if head_node is not None and isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
print_reverse(head_node.next )
print(head_node.data )
def a_ ( ) -> List[Any]:
from doctest import testmod
testmod()
__snake_case : Optional[int] = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(_UpperCAmelCase )
print('Elements in Reverse:' )
print_reverse(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 286
|
'''simple docstring'''
import qiskit
def a_ ( _UpperCAmelCase : int = 2 ) -> qiskit.result.counts.Counts:
__snake_case : Union[str, Any] = qubits
# Using Aer's simulator
__snake_case : List[Any] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
__snake_case : Dict = qiskit.QuantumCircuit(_UpperCAmelCase ,_UpperCAmelCase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 ,_UpperCAmelCase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 ,_UpperCAmelCase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(_UpperCAmelCase ) ) ,list(range(_UpperCAmelCase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
__snake_case : Optional[Any] = qiskit.execute(_UpperCAmelCase ,_UpperCAmelCase ,shots=10_00 )
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 286
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ (__a : int , __a : Any=False ):
"""simple docstring"""
_a : str = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_a : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def UpperCAmelCase_ (__a : Dict , __a : Union[str, Any] , __a : Any=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_a : Any = ''
else:
_a : Optional[int] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_a : str = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
_a : Any = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : List[Any] = in_proj_weight[
: config.hidden_size, :
]
_a : Dict = in_proj_bias[: config.hidden_size]
_a : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_a : int = in_proj_weight[
-config.hidden_size :, :
]
_a : List[str] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ (__a : Any ):
"""simple docstring"""
_a : Optional[Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def UpperCAmelCase_ (__a : List[Any] , __a : Union[str, Any] , __a : Union[str, Any] ):
"""simple docstring"""
_a : Tuple = dct.pop(__snake_case )
_a : Any = val
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_a : str = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ (__a : Union[str, Any] , __a : Any , __a : int=False ):
"""simple docstring"""
_a : Optional[Any] = BitConfig(
global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=__snake_case , )
_a : Optional[int] = ViTHybridConfig(backbone_config=__snake_case , image_size=3_8_4 , num_labels=1_0_0_0 )
_a : Optional[int] = False
# load original model from timm
_a : List[Any] = timm.create_model(__snake_case , pretrained=__snake_case )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_a : Optional[int] = timm_model.state_dict()
if base_model:
remove_classification_head_(__snake_case )
_a : int = create_rename_keys(__snake_case , __snake_case )
for src, dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
read_in_q_k_v(__snake_case , __snake_case , __snake_case )
_a : int = 'huggingface/label-files'
_a : Dict = 'imagenet-1k-id2label.json'
_a : List[str] = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='dataset' ) , 'r' ) )
_a : Tuple = {int(__snake_case ): v for k, v in idalabel.items()}
_a : Any = idalabel
_a : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_a : List[str] = ViTHybridModel(__snake_case ).eval()
else:
_a : List[str] = ViTHybridForImageClassification(__snake_case ).eval()
model.load_state_dict(__snake_case )
# create image processor
_a : List[str] = create_transform(**resolve_data_config({} , model=__snake_case ) )
_a : Optional[int] = transform.transforms
_a : Optional[Any] = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
_a : Dict = ViTHybridImageProcessor(
do_resize=__snake_case , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__snake_case , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=__snake_case , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_a : Union[str, Any] = prepare_img()
_a : Optional[int] = transform(__snake_case ).unsqueeze(0 )
_a : Optional[int] = processor(__snake_case , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(__snake_case , __snake_case )
# verify logits
with torch.no_grad():
_a : List[Any] = model(__snake_case )
_a : str = outputs.logits
print('Predicted class:' , logits.argmax(-1 ).item() )
if base_model:
_a : List[str] = timm_model.forward_features(__snake_case )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__snake_case , outputs.pooler_output , atol=1e-3 )
else:
_a : Union[str, Any] = timm_model(__snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__snake_case )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__snake_case )
if push_to_hub:
print(f"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(f"""ybelkada/{vit_name}""" )
processor.push_to_hub(f"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
__lowerCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 712
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 0
|
from ... import PretrainedConfig
__lowerCAmelCase = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class lowerCamelCase_ ( lowercase ):
__lowercase : List[Any] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__lowercase : int = "nezha"
def __init__( self , lowerCamelCase_=2_11_28 , lowerCamelCase_=7_68 , lowerCamelCase_=12 , lowerCamelCase_=12 , lowerCamelCase_=30_72 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_12 , lowerCamelCase_=64 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=1E-12 , lowerCamelCase_=0.1 , lowerCamelCase_=0 , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=True , **lowerCamelCase_ , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = max_relative_position
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = classifier_dropout
_UpperCamelCase = use_cache
| 147
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
def __init__( self , lowerCamelCase_ , lowerCamelCase_=3 , lowerCamelCase_=32 , lowerCamelCase_=3 , lowerCamelCase_=10 , lowerCamelCase_=[10, 20, 30, 40] , lowerCamelCase_=[1, 1, 2, 1] , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_="relu" , lowerCamelCase_=3 , lowerCamelCase_=None , ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = embeddings_size
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_act
_UpperCamelCase = num_labels
_UpperCamelCase = scope
_UpperCamelCase = len(lowerCamelCase_ )
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self ) -> List[str]:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase = TFResNetModel(config=lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFResNetForImageClassification(lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( lowercase , lowercase , unittest.TestCase ):
__lowercase : str = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__lowercase : Union[str, Any] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__lowercase : Tuple = False
__lowercase : Dict = False
__lowercase : Any = False
__lowercase : int = False
__lowercase : Optional[Any] = False
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = TFResNetModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowercase ( self ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def lowercase ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCamelCase_ )
_UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowercase ( self ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
_UpperCamelCase = model_class(lowerCamelCase_ )
_UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
_UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase = layer_type
_UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowercase ( self ) -> List[str]:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFResNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def _lowercase ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="tf" )
# forward pass
_UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
_UpperCamelCase = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
_UpperCamelCase = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase_ , atol=1E-4 ) )
| 147
| 1
|
__lowerCAmelCase ={
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = set()
# keep track of all the paths to be checked
UpperCAmelCase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCAmelCase = queue.pop(0 )
# get the last node from the path
UpperCAmelCase = path[-1]
if node not in explored:
UpperCAmelCase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCAmelCase = list(_lowerCAmelCase )
new_path.append(_lowerCAmelCase )
queue.append(_lowerCAmelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_lowerCAmelCase )
# in case there's no path between the 2 nodes
return []
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCAmelCase = [start]
UpperCAmelCase = set(_lowerCAmelCase )
# Keep tab on distances from `start` node.
UpperCAmelCase = {start: 0, target: -1}
while queue:
UpperCAmelCase = queue.pop(0 )
if node == target:
UpperCAmelCase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_lowerCAmelCase )
queue.append(_lowerCAmelCase )
UpperCAmelCase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 700
|
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __UpperCamelCase ( *_lowerCAmelCase ):
"""simple docstring"""
with open(_lowerCAmelCase , "r" ) as fh:
fcntl.flock(_lowerCAmelCase , fcntl.LOCK_EX )
try:
print(*_lowerCAmelCase )
finally:
fcntl.flock(_lowerCAmelCase , fcntl.LOCK_UN )
__lowerCAmelCase =int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__lowerCAmelCase =torch.device("cuda", local_rank)
__lowerCAmelCase =socket.gethostname()
__lowerCAmelCase =f"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__lowerCAmelCase =dist.get_rank()
__lowerCAmelCase =dist.get_world_size()
printflock(f"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(f"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(f"{gpu} is broken")
raise
| 405
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Tuple = logging.get_logger(__name__)
def __lowerCAmelCase ( __snake_case , __snake_case=False ):
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ""
else:
__lowerCAmelCase = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
__lowerCAmelCase = dct.pop(UpperCamelCase__ )
__lowerCAmelCase = val
def __lowerCAmelCase ( ):
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( __snake_case , __snake_case ):
__lowerCAmelCase = DeiTConfig()
# all deit models have fine-tuned heads
__lowerCAmelCase = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
__lowerCAmelCase = 1000
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = int(deit_name[-6:-4] )
__lowerCAmelCase = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
__lowerCAmelCase = 192
__lowerCAmelCase = 768
__lowerCAmelCase = 12
__lowerCAmelCase = 3
elif deit_name[9:].startswith("small" ):
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
__lowerCAmelCase = 1024
__lowerCAmelCase = 4096
__lowerCAmelCase = 24
__lowerCAmelCase = 16
# load original model from timm
__lowerCAmelCase = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = timm_model.state_dict()
__lowerCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
__lowerCAmelCase = DeiTForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
__lowerCAmelCase = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
__lowerCAmelCase = DeiTImageProcessor(size=UpperCamelCase__ , crop_size=config.image_size )
__lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" )
__lowerCAmelCase = encoding["pixel_values"]
__lowerCAmelCase = model(UpperCamelCase__ )
__lowerCAmelCase = timm_model(UpperCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase__ , outputs.logits , atol=1E-3 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCamelCase : Dict = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 367
|
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 600_851_475_143 ):
try:
SCREAMING_SNAKE_CASE__ = int(UpperCamelCase__ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
while i * i <= n:
while n % i == 0:
SCREAMING_SNAKE_CASE__ = i
n //= i
i += 1
if n > 1:
SCREAMING_SNAKE_CASE__ = n
return int(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 6
| 0
|
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = []
for part_id in partition_order:
_lowerCAmelCase = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(__UpperCamelCase ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_lowerCAmelCase = spark.range(1_0_0 ).repartition(1 )
_lowerCAmelCase = Spark(__UpperCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=1_6 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 5_0
@require_not_windows
@require_dill_gt_0_3_2
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_lowerCAmelCase = spark.range(1_0 ).repartition(2 )
_lowerCAmelCase = [1, 0]
_lowerCAmelCase = _generate_iterable_examples(__UpperCamelCase, __UpperCamelCase ) # Reverse the partitions.
_lowerCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCamelCase, __UpperCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
_lowerCAmelCase , _lowerCAmelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_lowerCAmelCase = spark.range(1_0 ).repartition(1 )
_lowerCAmelCase = SparkExamplesIterable(__UpperCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__UpperCamelCase ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_lowerCAmelCase = spark.range(3_0 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
_lowerCAmelCase = lambda __lowerCamelCase : x.reverse()
_lowerCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCamelCase, [2, 1, 0] )
_lowerCAmelCase = SparkExamplesIterable(__UpperCamelCase ).shuffle_data_sources(__UpperCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__UpperCamelCase ):
_lowerCAmelCase , _lowerCAmelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_lowerCAmelCase = spark.range(2_0 ).repartition(4 )
# Partitions 0 and 2
_lowerCAmelCase = SparkExamplesIterable(__UpperCamelCase ).shard_data_sources(worker_id=0, num_workers=2 )
assert shard_it_a.n_shards == 2
_lowerCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCamelCase, [0, 2] )
for i, (row_id, row_dict) in enumerate(__UpperCamelCase ):
_lowerCAmelCase , _lowerCAmelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_lowerCAmelCase = SparkExamplesIterable(__UpperCamelCase ).shard_data_sources(worker_id=1, num_workers=2 )
assert shard_it_a.n_shards == 2
_lowerCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCamelCase, [1, 3] )
for i, (row_id, row_dict) in enumerate(__UpperCamelCase ):
_lowerCAmelCase , _lowerCAmelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_lowerCAmelCase = spark.range(1_0_0 ).repartition(1 )
_lowerCAmelCase = Spark(__UpperCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_0_0
| 713
|
"""simple docstring"""
import operator as op
a__ : Optional[int] = """scaler.pt"""
a__ : Dict = """pytorch_model"""
a__ : List[Any] = """random_states"""
a__ : Union[str, Any] = """optimizer"""
a__ : Tuple = """scheduler"""
a__ : Any = """pytorch_model.bin"""
a__ : int = """pytorch_model.bin.index.json"""
a__ : Union[str, Any] = """model.safetensors"""
a__ : Optional[int] = """model.safetensors.index.json"""
a__ : str = """1.10.2"""
a__ : int = """py38"""
a__ : Any = """4.17.0"""
a__ : List[str] = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
a__ : str = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
a__ : Optional[int] = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
a__ : int = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
a__ : int = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
a__ : int = """2.0.1"""
a__ : Optional[Any] = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
a__ : int = ["""default""", """reduce-overhead""", """max-autotune"""]
a__ : Optional[Any] = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
a__ : Any = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
a__ : List[str] = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
a__ : Dict = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 309
| 0
|
"""simple docstring"""
def _snake_case ( __snake_case : int , __snake_case : Dict , __snake_case : int , __snake_case : int ):
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_lowerCamelCase : Dict = mf_knapsack(i - 1 , __snake_case , __snake_case , __snake_case )
else:
_lowerCamelCase : Union[str, Any] = max(
mf_knapsack(i - 1 , __snake_case , __snake_case , __snake_case ) , mf_knapsack(i - 1 , __snake_case , __snake_case , j - wt[i - 1] ) + val[i - 1] , )
_lowerCamelCase : List[Any] = val
return f[i][j]
def _snake_case ( __snake_case : Dict , __snake_case : Tuple , __snake_case : str , __snake_case : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_lowerCamelCase : List[str] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_lowerCamelCase : Union[str, Any] = dp[i - 1][w_]
return dp[n][w_], dp
def _snake_case ( __snake_case : int , __snake_case : list , __snake_case : list ):
"""simple docstring"""
if not (isinstance(__snake_case , (list, tuple) ) and isinstance(__snake_case , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
_lowerCamelCase : int = len(__snake_case )
if num_items != len(__snake_case ):
_lowerCamelCase : List[Any] = (
"""The number of weights must be the same as the number of values.\n"""
F'But got {num_items} weights and {len(__snake_case )} values'
)
raise ValueError(__snake_case )
for i in range(__snake_case ):
if not isinstance(wt[i] , __snake_case ):
_lowerCamelCase : List[Any] = (
"""All weights must be integers but got weight of """
F'type {type(wt[i] )} at index {i}'
)
raise TypeError(__snake_case )
_lowerCamelCase , _lowerCamelCase : int = knapsack(__snake_case , __snake_case , __snake_case , __snake_case )
_lowerCamelCase : set = set()
_construct_solution(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
return optimal_val, example_optional_set
def _snake_case ( __snake_case : list , __snake_case : list , __snake_case : int , __snake_case : int , __snake_case : set ):
"""simple docstring"""
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(__snake_case , __snake_case , i - 1 , __snake_case , __snake_case )
else:
optimal_set.add(__snake_case )
_construct_solution(__snake_case , __snake_case , i - 1 , j - wt[i - 1] , __snake_case )
if __name__ == "__main__":
UpperCAmelCase = [3, 2, 4, 4]
UpperCAmelCase = [4, 3, 2, 3]
UpperCAmelCase = 4
UpperCAmelCase = 6
UpperCAmelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
UpperCAmelCase , UpperCAmelCase = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
UpperCAmelCase , UpperCAmelCase = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 88
|
from ...processing_utils import ProcessorMixin
class a_ ( _a ):
a : Optional[int] = '''SpeechT5FeatureExtractor'''
a : List[Any] = '''SpeechT5Tokenizer'''
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self , *__UpperCamelCase , **__UpperCamelCase ):
_lowercase = kwargs.pop("""audio""" , __UpperCamelCase )
_lowercase = kwargs.pop("""text""" , __UpperCamelCase )
_lowercase = kwargs.pop("""text_target""" , __UpperCamelCase )
_lowercase = kwargs.pop("""audio_target""" , __UpperCamelCase )
_lowercase = kwargs.pop("""sampling_rate""" , __UpperCamelCase )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
_lowercase = self.feature_extractor(__UpperCamelCase , *__UpperCamelCase , sampling_rate=__UpperCamelCase , **__UpperCamelCase )
elif text is not None:
_lowercase = self.tokenizer(__UpperCamelCase , **__UpperCamelCase )
else:
_lowercase = None
if audio_target is not None:
_lowercase = self.feature_extractor(audio_target=__UpperCamelCase , *__UpperCamelCase , sampling_rate=__UpperCamelCase , **__UpperCamelCase )
_lowercase = targets["""input_values"""]
elif text_target is not None:
_lowercase = self.tokenizer(__UpperCamelCase , **__UpperCamelCase )
_lowercase = targets["""input_ids"""]
else:
_lowercase = None
if inputs is None:
return targets
if targets is not None:
_lowercase = labels
_lowercase = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
_lowercase = decoder_attention_mask
return inputs
def UpperCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
_lowercase = kwargs.pop("""input_values""" , __UpperCamelCase )
_lowercase = kwargs.pop("""input_ids""" , __UpperCamelCase )
_lowercase = kwargs.pop("""labels""" , __UpperCamelCase )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
_lowercase = self.feature_extractor.pad(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
elif input_ids is not None:
_lowercase = self.tokenizer.pad(__UpperCamelCase , **__UpperCamelCase )
else:
_lowercase = None
if labels is not None:
if "input_ids" in labels or (isinstance(__UpperCamelCase , __UpperCamelCase ) and "input_ids" in labels[0]):
_lowercase = self.tokenizer.pad(__UpperCamelCase , **__UpperCamelCase )
_lowercase = targets["""input_ids"""]
else:
_lowercase = self.feature_extractor.feature_size
_lowercase = self.feature_extractor.num_mel_bins
_lowercase = self.feature_extractor.pad(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
_lowercase = feature_size_hack
_lowercase = targets["""input_values"""]
else:
_lowercase = None
if inputs is None:
return targets
if targets is not None:
_lowercase = labels
_lowercase = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
_lowercase = decoder_attention_mask
return inputs
def UpperCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def UpperCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
| 287
| 0
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_A: Dict = False
class UpperCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__UpperCAmelCase = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe.dual_guided(
prompt='first prompt' , image=lowerCamelCase__ , text_to_image_strength=0.7_5 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
__UpperCAmelCase = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCAmelCase = generator.manual_seed(0 )
__UpperCAmelCase = pipe.dual_guided(
prompt='first prompt' , image=lowerCamelCase__ , text_to_image_strength=0.7_5 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCamelCase ( self ):
__UpperCAmelCase = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCAmelCase = '''cyberpunk 2077'''
__UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe.dual_guided(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , text_to_image_strength=0.7_5 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__UpperCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__UpperCAmelCase = '''A painting of a squirrel eating a burger '''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe.text_to_image(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
__UpperCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__UpperCAmelCase = pipe.image_variation(lowerCamelCase__ , generator=lowerCamelCase__ , output_type='numpy' ).images
__UpperCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 718
|
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class UpperCAmelCase :
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=64 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.0_2 , __A=3 , __A=4 , __A=None , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_input_mask
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = embedding_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = num_labels
__UpperCAmelCase = num_choices
__UpperCAmelCase = scope
def __lowerCamelCase ( self ):
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
if self.use_token_type_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = MegatronBertModel(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A )
__UpperCAmelCase = model(__A , token_type_ids=__A )
__UpperCAmelCase = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = MegatronBertForMaskedLM(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = MegatronBertForCausalLM(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = MegatronBertForNextSentencePrediction(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = MegatronBertForPreTraining(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , next_sentence_label=__A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = MegatronBertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = MegatronBertForSequenceClassification(__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = MegatronBertForTokenClassification(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = MegatronBertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) = config_and_inputs
__UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
_A : str = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_A : Dict = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : List[str] = True
# test_resize_embeddings = False
_A : Tuple = False
def __lowerCamelCase ( self , __A , __A , __A=False ):
__UpperCAmelCase = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
__UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A )
__UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def __lowerCamelCase ( self ):
__UpperCAmelCase = MegatronBertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__A , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__A )
def _lowerCAmelCase ( _lowerCAmelCase )-> Dict:
return torch.tensor(
_lowerCAmelCase , dtype=torch.long , device=_lowerCAmelCase , )
_A: Any = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip('Model is not available.' )
def __lowerCamelCase ( self ):
__UpperCAmelCase = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
__UpperCAmelCase = os.path.join(os.environ['MYDIR'] , __A )
__UpperCAmelCase = MegatronBertModel.from_pretrained(__A )
model.to(__A )
model.half()
__UpperCAmelCase = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
__UpperCAmelCase = model(__A )[0]
__UpperCAmelCase = torch.Size((1, 9, 1_024) )
self.assertEqual(output.shape , __A )
__UpperCAmelCase = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
__UpperCAmelCase = output[0, ii, jj]
__UpperCAmelCase = expected[3 * ii + jj]
__UpperCAmelCase = 'ii={} jj={} a={} b={}'.format(__A , __A , __A , __A )
self.assertTrue(math.isclose(__A , __A , rel_tol=__A , abs_tol=__A ) , msg=__A )
| 617
| 0
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ : Optional[Any] = 16
A_ : List[Any] = 32
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ = 1_6 , UpperCAmelCase__ = "bert-base-cased" ) -> Union[str, Any]:
UpperCamelCase_: str = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
UpperCamelCase_: int = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_: Optional[int] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase_: Tuple = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_: Any = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase__ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return tokenizer.pad(UpperCAmelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
UpperCamelCase_: Dict = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
UpperCamelCase_: Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
return train_dataloader, eval_dataloader
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]:
# Initialize accelerator
UpperCamelCase_: List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_: List[str] = config['lr']
UpperCamelCase_: Any = int(config['num_epochs'] )
UpperCamelCase_: str = int(config['seed'] )
UpperCamelCase_: Union[str, Any] = int(config['batch_size'] )
UpperCamelCase_: List[Any] = args.model_name_or_path
set_seed(UpperCAmelCase__ )
UpperCamelCase_ ,UpperCamelCase_: int = get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_: Tuple = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
# Instantiate optimizer
UpperCamelCase_: Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase_: Optional[int] = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase_: Optional[int] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
UpperCamelCase_: Dict = 1
UpperCamelCase_: int = (len(UpperCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase_: Dict = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase__ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase__ , )
else:
UpperCamelCase_: int = DummyScheduler(UpperCAmelCase__ , total_num_steps=UpperCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Tuple = accelerator.prepare(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase_: List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase_: List[str] = 0
# Now we train the model
UpperCamelCase_: Dict = evaluate.load('glue' , 'mrpc' )
UpperCamelCase_: Optional[int] = 0
UpperCamelCase_: Optional[Any] = {}
for epoch in range(UpperCAmelCase__ , UpperCAmelCase__ ):
model.train()
for step, batch in enumerate(UpperCAmelCase__ ):
UpperCamelCase_: List[Any] = model(**UpperCAmelCase__ )
UpperCamelCase_: Union[str, Any] = outputs.loss
UpperCamelCase_: Any = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
UpperCamelCase_: Optional[int] = 0
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase_: Optional[Any] = model(**UpperCAmelCase__ )
UpperCamelCase_: Union[str, Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase_ ,UpperCamelCase_: Optional[Any] = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase__ ) - 1:
UpperCamelCase_: str = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase_: List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , )
UpperCamelCase_: Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
UpperCamelCase_: List[Any] = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def snake_case () -> int:
UpperCamelCase_: str = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=UpperCAmelCase__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCAmelCase__ , )
parser.add_argument(
'--output_dir' , type=UpperCAmelCase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=UpperCAmelCase__ , default=3 , help='Number of train epochs.' , )
UpperCamelCase_: int = parser.parse_args()
UpperCamelCase_: List[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 4_2, 'batch_size': 1_6}
training_function(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 57
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , ):
__a : int = parent
__a : str = batch_size
__a : List[Any] = num_channels
__a : Union[str, Any] = image_size
__a : List[Any] = min_resolution
__a : str = max_resolution
__a : List[str] = do_resize
__a : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 20}
__a : str = do_thumbnail
__a : str = do_align_axis
__a : Dict = do_pad
__a : Union[str, Any] = do_normalize
__a : List[str] = image_mean
__a : Optional[int] = image_std
def _lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
__a : Tuple = DonutImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) )
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__a : int = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def _lowerCamelCase ( self ):
pass
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : int = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : str = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__a : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : List[str] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 52
| 0
|
from heapq import heappop, heappush
import numpy as np
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> tuple[float | int, list[tuple[int, int]]]:
__lowercase , __lowercase = grid.shape
__lowercase = [-1, 1, 0, 0]
__lowercase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__lowercase , __lowercase = [(0, source)], set()
__lowercase = np.full((rows, cols) , np.inf )
__lowercase = 0
__lowercase = np.empty((rows, cols) , dtype=lowercase__ )
__lowercase = None
while queue:
((__lowercase) , (__lowercase)) = heappop(lowercase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__lowercase = []
while (x, y) != source:
path.append((x, y) )
__lowercase , __lowercase = predecessors[x, y]
path.append(lowercase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase__ ) ):
__lowercase , __lowercase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__lowercase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase__ , (dist + 1, (nx, ny)) )
__lowercase = dist + 1
__lowercase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowercase : str , lowercase : Union[str, Any]=13 , lowercase : Tuple=32 , lowercase : Optional[Any]=2 , lowercase : Tuple=3 , lowercase : Tuple=16 , lowercase : Tuple=[1, 2, 1] , lowercase : Optional[Any]=[2, 2, 4] , lowercase : Dict=2 , lowercase : Optional[int]=2.0 , lowercase : List[Any]=True , lowercase : str=0.0 , lowercase : Any=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : Tuple=False , lowercase : Optional[Any]=True , lowercase : int=0.02 , lowercase : Union[str, Any]=1E-5 , lowercase : Dict=True , lowercase : Any=None , lowercase : str=True , lowercase : str=10 , lowercase : Dict=8 , lowercase : int=["stage1", "stage2", "stage3"] , lowercase : Optional[int]=[1, 2, 3] , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = patch_norm
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = is_training
__lowercase = scope
__lowercase = use_labels
__lowercase = type_sequence_label_size
__lowercase = encoder_stride
__lowercase = out_features
__lowercase = out_indices
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : List[str] ) -> int:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def snake_case__ ( self : Any , lowercase : List[Any] , lowercase : Optional[int] , lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = MaskFormerSwinModel(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
__lowercase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowercase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case__ ( self : Any , lowercase : Tuple , lowercase : Any , lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = MaskFormerSwinBackbone(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(lowercase ):
__lowercase = ["""stem"""]
__lowercase = MaskFormerSwinBackbone(config=lowercase )
def snake_case__ ( self : int ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : List[str] = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
lowercase__ : List[str] = False
lowercase__ : int = False
lowercase__ : int = False
lowercase__ : Tuple = False
lowercase__ : Optional[Any] = False
def snake_case__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
__lowercase = ConfigTester(self , config_class=lowercase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def snake_case__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def snake_case__ ( self : int ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def snake_case__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def snake_case__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple , lowercase : Tuple , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase , lowercase ) )
__lowercase = outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowercase ) , lowercase )
# Swin has a different seq_length
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case__ ( self : int ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
def snake_case__ ( self : int ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowercase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def snake_case__ ( self : Any ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def snake_case__ ( self : List[str] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowercase : Optional[int] ):
__lowercase = 0
return t
def check_equivalence(lowercase : Optional[int] , lowercase : str , lowercase : str , lowercase : Tuple={} ):
with torch.no_grad():
__lowercase = model(**lowercase , return_dict=lowercase , **lowercase )
__lowercase = model(**lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase : int , lowercase : Optional[Any] ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif isinstance(lowercase , lowercase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowercase ) , set_nan_tensor_to_zero(lowercase ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}. Dict has"
F" `nan`: {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}."
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"""output_hidden_states""": True} )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"""output_hidden_states""": True} )
@require_torch
class _lowerCAmelCase ( unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : List[str] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase__ : Any = MaskFormerSwinConfig
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
def snake_case__ ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__lowercase = backbone_class(lowercase )
backbone.to(lowercase )
backbone.eval()
__lowercase = backbone(**lowercase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , lowercase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowercase = backbone(**lowercase , output_hidden_states=lowercase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowercase , __lowercase , __lowercase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowercase = backbone(**lowercase , output_attentions=lowercase )
self.assertIsNotNone(outputs.attentions )
| 634
| 1
|
import torch
from transformers import AutoModel
class __lowercase (torch.nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(A__ , self ).__init__()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoModel.from_pretrained(A__ , return_dict=A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.nn.CosineSimilarity(3 , 1E-08 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.nn.Softmax(dim=1 )
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
return self.bert(**A__ ).last_hidden_state
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=A__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(A__ , A__ ) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = W_supports["sizes"].tolist()
SCREAMING_SNAKE_CASE_ : Dict = W_supports["start_token_id"].item()
SCREAMING_SNAKE_CASE_ : List[Any] = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
SCREAMING_SNAKE_CASE_ : List[Any] = self.BERT(**A__ )
SCREAMING_SNAKE_CASE_ : List[str] = self.BERT(**A__ )
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Dict = W_supports["input_ids"] == start_token_id
SCREAMING_SNAKE_CASE_ : Any = W_supports["input_ids"] == end_token_id
for i, size in enumerate(A__ ):
if i == 0:
SCREAMING_SNAKE_CASE_ : List[Any] = 0
else:
SCREAMING_SNAKE_CASE_ : int = support_sizes[i - 1]
SCREAMING_SNAKE_CASE_ : Dict = S[s : s + size][start_token_masks[s : s + size]]
SCREAMING_SNAKE_CASE_ : str = S[s : s + size][end_token_masks[s : s + size]]
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.vstack((p_starts, p_start) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.vstack((p_ends, p_end) )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = p_start
SCREAMING_SNAKE_CASE_ : int = p_end
return p_starts, p_ends
| 101
|
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple ):
class snake_case__ :
def __init__( self : Any , A__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = metric_id
class snake_case__ :
_SCREAMING_SNAKE_CASE : List[str] = [MetricMock(_UpperCamelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: int , lowerCAmelCase_: List[Any] , lowerCAmelCase_: Any , lowerCAmelCase_: List[str] ):
if "tmp_path" in args:
snake_case_ : List[Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase_ , match="https://huggingface.co/docs/evaluate" ):
func(*lowerCAmelCase_ )
| 666
| 0
|
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class UpperCamelCase__( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int]=None , snake_case__ : Any=None , snake_case__ : Dict=0 ):
"""simple docstring"""
A =1.0 if scale is None else scale
A =0.0 if loc is None else loc
super().__init__(__lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__lowerCamelCase )] )
@property
def _a ( self : Tuple ):
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def _a ( self : List[str] ):
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return self.variance.sqrt()
class UpperCamelCase__( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Any , **snake_case__ : List[Any] ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
A =args_dim
A =nn.ModuleList([nn.Linear(__lowerCamelCase , __lowerCamelCase ) for dim in args_dim.values()] )
A =domain_map
def _a ( self : Dict , snake_case__ : Optional[int] ):
"""simple docstring"""
A =[proj(__lowerCamelCase ) for proj in self.proj]
return self.domain_map(*__lowerCamelCase )
class UpperCamelCase__( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case__ : Any ):
"""simple docstring"""
super().__init__()
A =function
def _a ( self : Any , snake_case__ : Any , *snake_case__ : Dict ):
"""simple docstring"""
return self.function(__lowerCamelCase , *__lowerCamelCase )
class UpperCamelCase__:
"""simple docstring"""
_A = 4_2
_A = 4_2
_A = 4_2
def __init__( self : Optional[Any] , snake_case__ : Any = 1 ):
"""simple docstring"""
A =dim
A ={k: dim * self.args_dim[k] for k in self.args_dim}
def _a ( self : Union[str, Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*__lowerCamelCase )
else:
return Independent(self.distribution_class(*__lowerCamelCase ) , 1 )
def _a ( self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : str = None , snake_case__ : Optional[int] = None , ):
"""simple docstring"""
A =self._base_distribution(__lowerCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__lowerCamelCase , loc=__lowerCamelCase , scale=__lowerCamelCase , event_dim=self.event_dim )
@property
def _a ( self : int ):
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return len(self.event_shape )
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return 0.0
def _a ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
return ParameterProjection(
in_features=__lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _a ( self : List[Any] , *snake_case__ : Union[str, Any] ):
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def _a ( snake_case__ : List[Any] ):
"""simple docstring"""
return (x + torch.sqrt(torch.square(__lowerCamelCase ) + 4.0 )) / 2.0
class UpperCamelCase__( lowerCamelCase__ ):
"""simple docstring"""
_A = {"df": 1, "loc": 1, "scale": 1}
_A = StudentT
@classmethod
def _a ( cls : Any , snake_case__ : Any , snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
A =cls.squareplus(__lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
A =2.0 + cls.squareplus(__lowerCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class UpperCamelCase__( lowerCamelCase__ ):
"""simple docstring"""
_A = {"loc": 1, "scale": 1}
_A = Normal
@classmethod
def _a ( cls : Optional[int] , snake_case__ : List[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
A =cls.squareplus(__lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class UpperCamelCase__( lowerCamelCase__ ):
"""simple docstring"""
_A = {"total_count": 1, "logits": 1}
_A = NegativeBinomial
@classmethod
def _a ( cls : Optional[Any] , snake_case__ : int , snake_case__ : Any ):
"""simple docstring"""
A =cls.squareplus(__lowerCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _a ( self : Tuple , snake_case__ : int ):
"""simple docstring"""
A =distr_args
if self.dim == 1:
return self.distribution_class(total_count=__lowerCamelCase , logits=__lowerCamelCase )
else:
return Independent(self.distribution_class(total_count=__lowerCamelCase , logits=__lowerCamelCase ) , 1 )
def _a ( self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : List[str] = None , snake_case__ : List[str] = None ):
"""simple docstring"""
A =distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 702
|
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_ ( a_ ) ->Tuple:
A =FileLock(str(tmpdir / "foo.lock" ) )
A =FileLock(str(tmpdir / "foo.lock" ) )
A =0.01
with locka.acquire():
with pytest.raises(a_ ):
A =time.time()
locka.acquire(a_ )
assert time.time() - _start > timeout
def UpperCamelCase_ ( a_ ) ->List[Any]:
A ="a" * 1000 + ".lock"
A =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(a_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(a_ ):
locka.acquire(0 )
| 689
| 0
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class A :
'''simple docstring'''
A__ = 42
A__ = None
A__ = None
def UpperCamelCase ( ) -> Node | None:
"""simple docstring"""
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def UpperCamelCase ( __magic_name__ : Node | None ) -> list[int]:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCamelCase ( __magic_name__ : Node | None ) -> list[int]:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCamelCase ( __magic_name__ : Node | None ) -> list[int]:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCamelCase ( __magic_name__ : Node | None ) -> int:
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCamelCase ( __magic_name__ : Node | None ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCamelCase ( __magic_name__ : Node | None , __magic_name__ : int ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase__ = []
def populate_output(__magic_name__ : Node | None , __magic_name__ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__magic_name__ , __magic_name__ )
return output
def UpperCamelCase ( __magic_name__ : Node | None , __magic_name__ : int ) -> Sequence[Node | None]:
"""simple docstring"""
lowercase__ = []
def populate_output(__magic_name__ : Node | None , __magic_name__ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__magic_name__ , __magic_name__ )
return output
def UpperCamelCase ( __magic_name__ : Node | None ) -> Sequence[Node | None] | list[Any]:
"""simple docstring"""
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(__magic_name__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__magic_name__ , __magic_name__ ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(__magic_name__ , __magic_name__ ) )
lowercase__ = 0
return output
def UpperCamelCase ( ) -> None: # Main function for testing.
"""simple docstring"""
lowercase__ = make_tree()
print(f'''In-order Traversal: {inorder(__magic_name__ )}''' )
print(f'''Pre-order Traversal: {preorder(__magic_name__ )}''' )
print(f'''Post-order Traversal: {postorder(__magic_name__ )}''' , """\n""" )
print(f'''Height of Tree: {height(__magic_name__ )}''' , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__magic_name__ ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__magic_name__ ) + 1 ):
print(f'''Level {level}:''' , get_nodes_from_left_to_right(__magic_name__ , level=__magic_name__ ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 15
|
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE__ = '''docs/source/en/_toctree.yml'''
def A ( __UpperCamelCase ) -> Optional[Any]:
A__ = defaultdict(__UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
A__ = [key for key, value in counts.items() if value > 1]
A__ = []
for duplicate_key in duplicates:
A__ = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__UpperCamelCase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() )
def A ( __UpperCamelCase=False ) -> str:
with open(__UpperCamelCase , encoding='utf-8' ) as f:
A__ = yaml.safe_load(f.read() )
# Get to the API doc
A__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ = content[api_idx]['sections']
# Then to the model doc
A__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
A__ = api_doc[model_idx]['sections']
A__ = [(idx, section) for idx, section in enumerate(__UpperCamelCase ) if 'sections' in section]
A__ = False
for idx, modality_doc in modalities_docs:
A__ = modality_doc['sections']
A__ = clean_model_doc_toc(__UpperCamelCase )
if old_modality_doc != new_modality_doc:
A__ = True
if overwrite:
A__ = new_modality_doc
if diff:
if overwrite:
A__ = model_doc
A__ = api_doc
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 9
| 0
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Tuple = BertTokenizer
UpperCAmelCase__: Tuple = BertTokenizerFast
UpperCAmelCase__: Dict = True
UpperCAmelCase__: List[Any] = True
UpperCAmelCase__: List[Any] = filter_non_english
def __A ( self ):
super().setUp()
A__ : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
A__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def __A ( self , A__ ):
A__ : Dict = """UNwant\u00E9d,running"""
A__ : Union[str, Any] = """unwanted, running"""
return input_text, output_text
def __A ( self ):
A__ : int = self.tokenizer_class(self.vocab_file )
A__ : List[str] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(A__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [9, 6, 7, 12, 10, 11] )
def __A ( self ):
if not self.test_rust_tokenizer:
return
A__ : Tuple = self.get_tokenizer()
A__ : List[str] = self.get_rust_tokenizer()
A__ : List[str] = """UNwant\u00E9d,running"""
A__ : List[str] = tokenizer.tokenize(A__ )
A__ : List[str] = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
A__ : Tuple = tokenizer.encode(A__ , add_special_tokens=A__ )
A__ : Dict = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
A__ : int = self.get_rust_tokenizer()
A__ : Union[str, Any] = tokenizer.encode(A__ )
A__ : int = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
# With lower casing
A__ : Dict = self.get_tokenizer(do_lower_case=A__ )
A__ : Dict = self.get_rust_tokenizer(do_lower_case=A__ )
A__ : Union[str, Any] = """UNwant\u00E9d,running"""
A__ : List[Any] = tokenizer.tokenize(A__ )
A__ : Union[str, Any] = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
A__ : Optional[Any] = tokenizer.encode(A__ , add_special_tokens=A__ )
A__ : Optional[Any] = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
A__ : int = self.get_rust_tokenizer()
A__ : Union[str, Any] = tokenizer.encode(A__ )
A__ : List[Any] = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
def __A ( self ):
A__ : Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __A ( self ):
A__ : Dict = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __A ( self ):
A__ : Any = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __A ( self ):
A__ : str = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __A ( self ):
A__ : str = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __A ( self ):
A__ : int = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __A ( self ):
A__ : Any = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __A ( self ):
A__ : List[str] = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __A ( self ):
A__ : Any = BasicTokenizer(do_lower_case=A__ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __A ( self ):
A__ : str = BasicTokenizer()
A__ : Dict = """a\n\'ll !!to?\'d of, can\'t."""
A__ : Any = ["""a""", """\'""", """ll""", """!""", """!""", """to""", """?""", """\'""", """d""", """of""", """,""", """can""", """\'""", """t""", """."""]
self.assertListEqual(tokenizer.tokenize(A__ ) , A__ )
def __A ( self ):
A__ : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
A__ : Optional[Any] = {}
for i, token in enumerate(A__ ):
A__ : Any = i
A__ : int = WordpieceTokenizer(vocab=A__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __A ( self ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __A ( self ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __A ( self ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __A ( self ):
A__ : List[Any] = self.get_tokenizer()
A__ : List[str] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def __A ( self ):
A__ : Optional[Any] = self.tokenizer_class.from_pretrained("""bert-base-uncased""" )
A__ : Union[str, Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=A__ )
A__ : Any = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A__ )
A__ : int = tokenizer.build_inputs_with_special_tokens(A__ )
A__ : List[Any] = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __A ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ : Any = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
A__ : int = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
A__ : Tuple = tokenizer_r.encode_plus(
A__ , return_attention_mask=A__ , return_token_type_ids=A__ , return_offsets_mapping=A__ , add_special_tokens=A__ , )
A__ : Any = tokenizer_r.do_lower_case if hasattr(A__ , """do_lower_case""" ) else False
A__ : Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __A ( self ):
A__ : List[Any] = ["""的""", """人""", """有"""]
A__ : Optional[Any] = """""".join(A__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ : Dict = True
A__ : str = self.tokenizer_class.from_pretrained(A__ , **A__ )
A__ : List[str] = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
A__ : List[str] = tokenizer_p.encode(A__ , add_special_tokens=A__ )
A__ : Optional[Any] = tokenizer_r.encode(A__ , add_special_tokens=A__ )
A__ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(A__ )
A__ : Tuple = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
A__ : Union[str, Any] = False
A__ : Any = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
A__ : Union[str, Any] = self.tokenizer_class.from_pretrained(A__ , **A__ )
A__ : List[Any] = tokenizer_r.encode(A__ , add_special_tokens=A__ )
A__ : Optional[int] = tokenizer_p.encode(A__ , add_special_tokens=A__ )
A__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(A__ )
A__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that only the first Chinese character is not preceded by "##".
A__ : str = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(A__ )
]
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
| 701
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCamelCase (*lowercase_: Optional[int] , lowercase_: Optional[Union[Dict, Any]] = None , lowercase_: Dict=True , lowercase_: Tuple=2 ) -> Dict:
from .. import __version__
A__ : Dict = take_from
A__ : str = ()
if not isinstance(args[0] , lowercase_ ):
A__ : int = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase_ ).base_version ) >= version.parse(lowercase_ ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
A__ : Any = None
if isinstance(lowercase_ , lowercase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase_ ),)
A__ : List[str] = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowercase_ , lowercase_ ):
values += (getattr(lowercase_ , lowercase_ ),)
A__ : Optional[Any] = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
A__ : int = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
A__ : int = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , lowercase_ , stacklevel=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) > 0:
A__ : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
A__ : Optional[Any] = call_frame.filename
A__ : Optional[int] = call_frame.lineno
A__ : Any = call_frame.function
A__ , A__ : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(lowercase_ ) == 0:
return
elif len(lowercase_ ) == 1:
return values[0]
return values
| 64
| 0
|
from __future__ import annotations
_lowercase : Dict =[-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_lowercase : Dict =[-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowerCAmelCase_ ( _lowercase : list[float]) -> list[float]:
"""simple docstring"""
a__ : Optional[Any] = []
a__ : Dict = len(_lowercase)
for i in range(_lowercase):
a__ : float = -1
for j in range(i + 1 , _lowercase):
if arr[i] < arr[j]:
a__ : Any = arr[j]
break
result.append(_lowercase)
return result
def lowerCAmelCase_ ( _lowercase : list[float]) -> list[float]:
"""simple docstring"""
a__ : List[Any] = []
for i, outer in enumerate(_lowercase):
a__ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
a__ : Optional[Any] = inner
break
result.append(_lowercase)
return result
def lowerCAmelCase_ ( _lowercase : list[float]) -> list[float]:
"""simple docstring"""
a__ : int = len(_lowercase)
a__ : list[float] = []
a__ : list[float] = [-1] * arr_size
for index in reversed(range(_lowercase)):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
a__ : List[str] = stack[-1]
stack.append(arr[index])
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_lowercase : int =(
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 136
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowercase : Optional[int] =logging.get_logger(__name__)
_lowercase : Any ={
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
_lowercase : int =[
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase_ ( _lowercase : int , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : List[str]) -> List[Any]:
"""simple docstring"""
for attribute in key.split("""."""):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
a__ : Optional[Any] = """lm_head"""
a__ : str = getattr(_lowercase , _lowercase)
if weight_type is not None:
a__ : Any = getattr(_lowercase , _lowercase).shape
else:
a__ : str = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a__ : Any = value
elif weight_type == "weight_g":
a__ : List[Any] = value
elif weight_type == "weight_v":
a__ : int = value
elif weight_type == "bias":
a__ : Tuple = value
else:
a__ : Dict = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''')
def lowerCAmelCase_ ( _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : Any) -> Optional[Any]:
"""simple docstring"""
a__ : Optional[int] = []
a__ : Union[str, Any] = fairseq_model.state_dict()
a__ : Dict = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
a__ : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == """group""" , )
a__ : Any = True
else:
for key, mapped_key in MAPPING.items():
a__ : Optional[Any] = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""")[-1] == name.split(""".""")[0]:
a__ : Dict = True
if "*" in mapped_key:
a__ : Optional[Any] = name.split(_lowercase)[0].split(""".""")[-2]
a__ : Union[str, Any] = mapped_key.replace("""*""" , _lowercase)
if "weight_g" in name:
a__ : Dict = """weight_g"""
elif "weight_v" in name:
a__ : List[Any] = """weight_v"""
elif "bias" in name:
a__ : int = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ : int = """weight"""
else:
a__ : Any = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
continue
if not is_used:
unused_weights.append(_lowercase)
logger.warning(F'''Unused weights: {unused_weights}''')
def lowerCAmelCase_ ( _lowercase : str , _lowercase : Dict , _lowercase : Optional[int] , _lowercase : Union[str, Any] , _lowercase : int) -> Tuple:
"""simple docstring"""
a__ : Any = full_name.split("""conv_layers.""")[-1]
a__ : Tuple = name.split(""".""")
a__ : Optional[Any] = int(items[0])
a__ : Optional[int] = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a__ : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a__ : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a__ : List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a__ : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(_lowercase)
@torch.no_grad()
def lowerCAmelCase_ ( _lowercase : List[Any] , _lowercase : str , _lowercase : Dict=None , _lowercase : Dict=None , _lowercase : Optional[Any]=True) -> List[Any]:
"""simple docstring"""
if config_path is not None:
a__ : int = UniSpeechConfig.from_pretrained(_lowercase)
else:
a__ : Optional[Any] = UniSpeechConfig()
if is_finetuned:
if dict_path:
a__ : List[Any] = Dictionary.load_from_json(_lowercase)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ : Dict = target_dict.pad_index
a__ : Optional[int] = target_dict.bos_index
a__ : Optional[int] = target_dict.eos_index
a__ : int = len(target_dict.symbols)
a__ : int = os.path.join(_lowercase , """vocab.json""")
if not os.path.isdir(_lowercase):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowercase))
return
os.makedirs(_lowercase , exist_ok=_lowercase)
a__ : str = target_dict.indices
# fairseq has the <pad> and <s> switched
a__ : Dict = 42
a__ : Optional[Any] = 43
with open(_lowercase , """w""" , encoding="""utf-8""") as vocab_handle:
json.dump(_lowercase , _lowercase)
a__ : Dict = WavaVecaPhonemeCTCTokenizer(
_lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowercase , )
a__ : Dict = True if config.feat_extract_norm == """layer""" else False
a__ : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , )
a__ : Optional[Any] = WavaVecaProcessor(feature_extractor=_lowercase , tokenizer=_lowercase)
processor.save_pretrained(_lowercase)
a__ : List[Any] = UniSpeechForCTC(_lowercase)
else:
a__ : Optional[Any] = UniSpeechForPreTraining(_lowercase)
if is_finetuned:
a__ , a__ , a__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""")[:-1]), """w2v_path""": checkpoint_path})
else:
a__ , a__ , a__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
a__ : Tuple = model[0].eval()
recursively_load_weights(_lowercase , _lowercase , _lowercase)
hf_unispeech.save_pretrained(_lowercase)
if __name__ == "__main__":
_lowercase : Optional[int] =argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_lowercase : Union[str, Any] =parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 136
| 1
|
from collections.abc import Generator
def _a ( ) -> Generator[int, None, None]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 0, 1
while True:
SCREAMING_SNAKE_CASE__ : Optional[Any] = b, a + b
yield b
def _a ( SCREAMING_SNAKE_CASE__ : int = 10_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = fibonacci_generator()
while len(str(next(SCREAMING_SNAKE_CASE__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 713
|
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "data2vec-audio"
def __init__( self : List[str], _UpperCAmelCase : Optional[Any]=3_2, _UpperCAmelCase : str=7_6_8, _UpperCAmelCase : Dict=1_2, _UpperCAmelCase : List[Any]=1_2, _UpperCAmelCase : Dict=3_0_7_2, _UpperCAmelCase : str="gelu", _UpperCAmelCase : Union[str, Any]=0.1, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : Any=0.1, _UpperCAmelCase : Tuple=0.0, _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : Any=0.02, _UpperCAmelCase : Tuple=1E-5, _UpperCAmelCase : Union[str, Any]="gelu", _UpperCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2), _UpperCAmelCase : str=(5, 2, 2, 2, 2, 2, 2), _UpperCAmelCase : int=(1_0, 3, 3, 3, 3, 2, 2), _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : List[str]=1_6, _UpperCAmelCase : Any=1_9, _UpperCAmelCase : List[Any]=5, _UpperCAmelCase : Dict=0.05, _UpperCAmelCase : Union[str, Any]=1_0, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : Optional[Any]=0.0, _UpperCAmelCase : List[Any]=1_0, _UpperCAmelCase : Optional[Any]=0, _UpperCAmelCase : Optional[Any]="sum", _UpperCAmelCase : str=False, _UpperCAmelCase : Any=False, _UpperCAmelCase : Optional[int]=2_5_6, _UpperCAmelCase : Optional[int]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0), _UpperCAmelCase : int=(5, 3, 3, 1, 1), _UpperCAmelCase : Optional[int]=(1, 2, 3, 1, 1), _UpperCAmelCase : Optional[Any]=5_1_2, _UpperCAmelCase : int=0, _UpperCAmelCase : Tuple=1, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : List[str]=False, _UpperCAmelCase : Dict=3, _UpperCAmelCase : Any=2, _UpperCAmelCase : Dict=3, _UpperCAmelCase : Dict=None, **_UpperCAmelCase : Any, ) -> Any:
"""simple docstring"""
super().__init__(**_UpperCAmelCase, pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract_activation
SCREAMING_SNAKE_CASE__ : Optional[int] = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = conv_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE__ : Tuple = conv_pos_kernel_size
SCREAMING_SNAKE_CASE__ : List[str] = len(self.conv_dim )
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout
SCREAMING_SNAKE_CASE__ : int = attention_dropout
SCREAMING_SNAKE_CASE__ : Dict = activation_dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_proj_dropout
SCREAMING_SNAKE_CASE__ : List[str] = final_dropout
SCREAMING_SNAKE_CASE__ : Tuple = layerdrop
SCREAMING_SNAKE_CASE__ : str = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : int = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE__ : int = mask_time_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mask_time_length
SCREAMING_SNAKE_CASE__ : List[str] = mask_time_min_masks
SCREAMING_SNAKE_CASE__ : Tuple = mask_feature_prob
SCREAMING_SNAKE_CASE__ : Dict = mask_feature_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = mask_feature_min_masks
# ctc loss
SCREAMING_SNAKE_CASE__ : Optional[int] = ctc_loss_reduction
SCREAMING_SNAKE_CASE__ : List[Any] = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE__ : int = add_adapter
SCREAMING_SNAKE_CASE__ : Dict = adapter_kernel_size
SCREAMING_SNAKE_CASE__ : Optional[int] = adapter_stride
SCREAMING_SNAKE_CASE__ : Dict = num_adapter_layers
SCREAMING_SNAKE_CASE__ : Dict = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE__ : Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE__ : Any = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = xvector_output_dim
@property
def A_ ( self : List[str] ) -> str:
"""simple docstring"""
return math.prod(self.conv_stride )
| 157
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : Tuple = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 595
|
import sys
from collections import defaultdict
class _A :
def __init__(self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = []
def _a (self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
return self.node_position[vertex]
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = pos
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCamelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCamelCase__ = 2 * start + 1
else:
UpperCamelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCamelCase__ , UpperCamelCase__ = heap[smallest_child], positions[smallest_child]
UpperCamelCase__ , UpperCamelCase__ = (
heap[start],
positions[start],
)
UpperCamelCase__ , UpperCamelCase__ = temp, tempa
UpperCamelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , SCREAMING_SNAKE_CASE_ )
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = position[index]
while index != 0:
UpperCamelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCamelCase__ = heap[parent]
UpperCamelCase__ = position[parent]
self.set_position(position[parent] , SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
break
UpperCamelCase__ = parent
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(SCREAMING_SNAKE_CASE_ , 0 )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) // 2 - 1
for i in range(SCREAMING_SNAKE_CASE_ , -1 , -1 ):
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = positions[0]
UpperCamelCase__ = sys.maxsize
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
return temp
def __UpperCamelCase ( A ):
UpperCamelCase__ = Heap()
UpperCamelCase__ = [0] * len(A )
UpperCamelCase__ = [-1] * len(A ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCamelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
UpperCamelCase__ = []
for vertex in range(len(A ) ):
distance_tv.append(sys.maxsize )
positions.append(A )
heap.node_position.append(A )
UpperCamelCase__ = []
UpperCamelCase__ = 1
UpperCamelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCamelCase__ = 0
UpperCamelCase__ = distance
heap.heapify(A , A )
for _ in range(1 , len(A ) ):
UpperCamelCase__ = heap.delete_minimum(A , A )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCamelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(A )]
):
UpperCamelCase__ = distance
heap.bottom_to_top(
A , heap.get_position(A ) , A , A )
UpperCamelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__magic_name__ =int(input('''Enter number of edges: ''').strip())
__magic_name__ =defaultdict(list)
for _ in range(edges_number):
__magic_name__ =[int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 415
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __A ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = self.dummy_uncond_unet
_lowerCAmelCase : str = ScoreSdeVeScheduler()
_lowerCAmelCase : Optional[Any] = ScoreSdeVePipeline(unet=_snake_case , scheduler=_snake_case )
sde_ve.to(_snake_case )
sde_ve.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase : Dict = torch.manual_seed(0 )
_lowerCAmelCase : str = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=_snake_case ).images
_lowerCAmelCase : int = torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=_snake_case , return_dict=_snake_case )[
0
]
_lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase : Optional[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Union[str, Any] = "google/ncsnpp-church-256"
_lowerCAmelCase : Optional[int] = UNetaDModel.from_pretrained(_snake_case )
_lowerCAmelCase : List[str] = ScoreSdeVeScheduler.from_pretrained(_snake_case )
_lowerCAmelCase : List[Any] = ScoreSdeVePipeline(unet=_snake_case , scheduler=_snake_case )
sde_ve.to(_snake_case )
sde_ve.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=_snake_case ).images
_lowerCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : int = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 706
|
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : int = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
_lowerCAmelCase : str = s_dict.pop(lowerCAmelCase__ )
elif "subsample" in key:
_lowerCAmelCase : Optional[Any] = s_dict.pop(lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase : Tuple = emb.weight.shape
_lowerCAmelCase : List[Any] = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Tuple = torch.load(lowerCAmelCase__ , map_location="cpu" )
_lowerCAmelCase : Dict = mam_aaa["args"]
_lowerCAmelCase : Optional[Any] = mam_aaa["model"]
_lowerCAmelCase : Optional[Any] = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(lowerCAmelCase__ )
rename_keys(lowerCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = state_dict["decoder.embed_tokens.weight"].shape[0]
_lowerCAmelCase : Dict = args.share_decoder_input_output_embed
_lowerCAmelCase : str = [int(lowerCAmelCase__ ) for i in args.conv_kernel_sizes.split("," )]
_lowerCAmelCase : Any = SpeechaTextConfig(
vocab_size=lowerCAmelCase__ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(lowerCAmelCase__ ) , conv_channels=args.conv_channels , conv_kernel_sizes=lowerCAmelCase__ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=lowerCAmelCase__ , num_beams=5 , max_length=2_00 , use_cache=lowerCAmelCase__ , decoder_start_token_id=2 , early_stopping=lowerCAmelCase__ , )
_lowerCAmelCase : Union[str, Any] = SpeechaTextForConditionalGeneration(lowerCAmelCase__ )
_lowerCAmelCase , _lowerCAmelCase : List[str] = model.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0 and not set(lowerCAmelCase__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
_lowerCAmelCase : Union[str, Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_lowerCAmelCase : Dict = lm_head_weights
model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
snake_case = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 587
| 0
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__a = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __UpperCAmelCase ( a_: str ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_ )
def __UpperCAmelCase ( a_: str ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Any = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(a_, id=a_ )
| 494
|
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__a = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class A__ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : str = " " ) -> str:
"""simple docstring"""
_UpperCAmelCase : Dict = sentence_delimiter
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : str ) -> Dict:
"""simple docstring"""
return list(lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : List[str] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : str = []
for sent_idx, sentence in enumerate(lowerCAmelCase__ ):
chars.extend(self.process_string(lowerCAmelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCAmelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
__a = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__a = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__a = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__a = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
__a = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )["wer"]
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Optional[Any] = 0
for prediction, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = jiwer.compute_measures(
lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 494
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Optional[Any] = logging.get_logger(__name__)
A : Optional[Any] = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
A = 'unispeech-sat'
def __init__( self :List[Any] , lowerCamelCase_ :Any=3_2 , lowerCamelCase_ :int=7_6_8 , lowerCamelCase_ :Any=1_2 , lowerCamelCase_ :str=1_2 , lowerCamelCase_ :Optional[Any]=3_0_7_2 , lowerCamelCase_ :Tuple="gelu" , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :List[Any]=0.0 , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :Optional[Any]=0.1 , lowerCamelCase_ :Optional[Any]=0.1 , lowerCamelCase_ :Optional[Any]=0.02 , lowerCamelCase_ :Union[str, Any]=1e-5 , lowerCamelCase_ :Dict="group" , lowerCamelCase_ :int="gelu" , lowerCamelCase_ :List[str]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCamelCase_ :List[str]=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase_ :str=(1_0, 3, 3, 3, 3, 2, 2) , lowerCamelCase_ :Union[str, Any]=False , lowerCamelCase_ :Union[str, Any]=1_2_8 , lowerCamelCase_ :Any=1_6 , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :Any=True , lowerCamelCase_ :Optional[Any]=0.05 , lowerCamelCase_ :List[Any]=1_0 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :str=0.0 , lowerCamelCase_ :int=1_0 , lowerCamelCase_ :List[Any]=0 , lowerCamelCase_ :Optional[int]=3_2_0 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :List[str]=0.1 , lowerCamelCase_ :str=1_0_0 , lowerCamelCase_ :Dict=2_5_6 , lowerCamelCase_ :List[Any]=2_5_6 , lowerCamelCase_ :List[str]=0.1 , lowerCamelCase_ :List[str]="mean" , lowerCamelCase_ :Dict=False , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :Optional[int]=2_5_6 , lowerCamelCase_ :Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowerCamelCase_ :List[Any]=(5, 3, 3, 1, 1) , lowerCamelCase_ :Dict=(1, 2, 3, 1, 1) , lowerCamelCase_ :Any=5_1_2 , lowerCamelCase_ :Tuple=0 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :List[str]=5_0_4 , **lowerCamelCase_ :Optional[int] , ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = feat_extract_norm
UpperCamelCase__ = feat_extract_activation
UpperCamelCase__ = list(lowerCamelCase_ )
UpperCamelCase__ = list(lowerCamelCase_ )
UpperCamelCase__ = list(lowerCamelCase_ )
UpperCamelCase__ = conv_bias
UpperCamelCase__ = num_conv_pos_embeddings
UpperCamelCase__ = num_conv_pos_embedding_groups
UpperCamelCase__ = len(self.conv_dim )
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = feat_proj_dropout
UpperCamelCase__ = final_dropout
UpperCamelCase__ = layerdrop
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = initializer_range
UpperCamelCase__ = vocab_size
UpperCamelCase__ = num_clusters
UpperCamelCase__ = do_stable_layer_norm
UpperCamelCase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ = apply_spec_augment
UpperCamelCase__ = mask_time_prob
UpperCamelCase__ = mask_time_length
UpperCamelCase__ = mask_time_min_masks
UpperCamelCase__ = mask_feature_prob
UpperCamelCase__ = mask_feature_length
UpperCamelCase__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCamelCase__ = num_codevectors_per_group
UpperCamelCase__ = num_codevector_groups
UpperCamelCase__ = contrastive_logits_temperature
UpperCamelCase__ = feat_quantizer_dropout
UpperCamelCase__ = num_negatives
UpperCamelCase__ = codevector_dim
UpperCamelCase__ = proj_codevector_dim
UpperCamelCase__ = diversity_loss_weight
# ctc loss
UpperCamelCase__ = ctc_loss_reduction
UpperCamelCase__ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase__ = list(lowerCamelCase_ )
UpperCamelCase__ = list(lowerCamelCase_ )
UpperCamelCase__ = list(lowerCamelCase_ )
UpperCamelCase__ = xvector_output_dim
@property
def lowerCamelCase__ ( self :List[str] ) -> Any:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 704
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ ( self :Dict ) -> int:
"""simple docstring"""
UpperCamelCase__ = "ZinengTang/tvlt-base"
UpperCamelCase__ = tempfile.mkdtemp()
def lowerCamelCase__ ( self :Tuple , **lowerCamelCase_ :List[str] ) -> List[str]:
"""simple docstring"""
return TvltImageProcessor.from_pretrained(self.checkpoint , **lowerCamelCase_ )
def lowerCamelCase__ ( self :str , **lowerCamelCase_ :Union[str, Any] ) -> Any:
"""simple docstring"""
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowerCamelCase_ )
def lowerCamelCase__ ( self :int ) -> List[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self :List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=lowerCamelCase_ , feature_extractor=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase_ )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def lowerCamelCase__ ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=lowerCamelCase_ , feature_extractor=lowerCamelCase_ )
UpperCamelCase__ = np.ones([1_2_0_0_0] )
UpperCamelCase__ = feature_extractor(lowerCamelCase_ , return_tensors="np" )
UpperCamelCase__ = processor(audio=lowerCamelCase_ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase__ ( self :Dict ) -> str:
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=lowerCamelCase_ , feature_extractor=lowerCamelCase_ )
UpperCamelCase__ = np.ones([3, 2_2_4, 2_2_4] )
UpperCamelCase__ = image_processor(lowerCamelCase_ , return_tensors="np" )
UpperCamelCase__ = processor(images=lowerCamelCase_ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase__ ( self :List[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=lowerCamelCase_ , feature_extractor=lowerCamelCase_ )
UpperCamelCase__ = np.ones([1_2_0_0_0] )
UpperCamelCase__ = np.ones([3, 2_2_4, 2_2_4] )
UpperCamelCase__ = processor(audio=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def lowerCamelCase__ ( self :Any ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=lowerCamelCase_ , feature_extractor=lowerCamelCase_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 304
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class SCREAMING_SNAKE_CASE__ ( A__ ):
A_ : Union[str, Any] = 'imagegpt'
A_ : List[Any] = ['past_key_values']
A_ : Any = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self : Tuple , a__ : List[Any]=512 + 1 , a__ : int=32 * 32 , a__ : Optional[int]=512 , a__ : List[Any]=24 , a__ : Union[str, Any]=8 , a__ : str=None , a__ : int="quick_gelu" , a__ : Dict=0.1 , a__ : Dict=0.1 , a__ : List[str]=0.1 , a__ : Optional[int]=1E-5 , a__ : Dict=0.0_2 , a__ : List[Any]=True , a__ : List[Any]=True , a__ : Optional[Any]=False , a__ : Any=False , a__ : Any=False , **a__ : Optional[Any] , ):
"""simple docstring"""
__snake_case = vocab_size
__snake_case = n_positions
__snake_case = n_embd
__snake_case = n_layer
__snake_case = n_head
__snake_case = n_inner
__snake_case = activation_function
__snake_case = resid_pdrop
__snake_case = embd_pdrop
__snake_case = attn_pdrop
__snake_case = layer_norm_epsilon
__snake_case = initializer_range
__snake_case = scale_attn_weights
__snake_case = use_cache
__snake_case = scale_attn_by_inverse_layer_idx
__snake_case = reorder_and_upcast_attn
__snake_case = tie_word_embeddings
super().__init__(tie_word_embeddings=__snake_case , **__snake_case )
class SCREAMING_SNAKE_CASE__ ( A__ ):
@property
def a (self : Tuple ):
"""simple docstring"""
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def a (self : Dict , a__ : "FeatureExtractionMixin" , a__ : int = 1 , a__ : int = -1 , a__ : bool = False , a__ : Optional["TensorType"] = None , a__ : int = 3 , a__ : int = 32 , a__ : int = 32 , ):
"""simple docstring"""
__snake_case = self._generate_dummy_images(__snake_case , __snake_case , __snake_case , __snake_case )
__snake_case = dict(preprocessor(images=__snake_case , return_tensors=__snake_case ) )
return inputs
| 592
|
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : int ) -> list:
UpperCAmelCase : Union[str, Any] = int(_lowerCAmelCase )
if n_element < 1:
UpperCAmelCase : int = ValueError('''a should be a positive number''' )
raise my_error
UpperCAmelCase : str = [1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = (0, 0, 0)
UpperCAmelCase : Any = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
UpperCamelCase__: List[str] = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
UpperCamelCase__: str = hamming(int(n))
print("-----------------------------------------------------")
print(F"The list with nth numbers is: {hamming_numbers}")
print("-----------------------------------------------------")
| 127
| 0
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowercase ( lowercase__ ) -> str:
__lowerCAmelCase : str = filter(lambda lowercase__ : p.requires_grad , model.parameters() )
__lowerCAmelCase : Dict = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_UpperCamelCase = logging.getLogger(__name__)
def _lowercase ( lowercase__ , lowercase__ ) -> str:
if metric == "rouge2":
__lowerCAmelCase : str = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
__lowerCAmelCase : str = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
__lowerCAmelCase : str = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
__lowerCAmelCase : Optional[Any] = ModelCheckpoint(
dirpath=__snake_case , filename=__snake_case , monitor=f"""val_{metric}""" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _lowercase ( lowercase__ , lowercase__ ) -> Tuple:
return EarlyStopping(
monitor=f"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=__snake_case , verbose=__snake_case , )
class __lowercase (pl.Callback ):
def UpperCamelCase__ ( self , A_ , A_ ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = {f"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_=True ) ->List[Any]:
'''simple docstring'''
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__lowerCAmelCase : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
__lowerCAmelCase : int = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowerCAmelCase : Optional[int] = od / '''test_results.txt'''
__lowerCAmelCase : Dict = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowerCAmelCase : int = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
__lowerCAmelCase : Any = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , '''a+''' ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowerCAmelCase : Optional[Any] = metrics[key]
if isinstance(_A , torch.Tensor ):
__lowerCAmelCase : Dict = val.item()
__lowerCAmelCase : Tuple = f"""{key}: {val:.6f}\n"""
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
__lowerCAmelCase : List[str] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_A )
@rank_zero_only
def UpperCamelCase__ ( self , A_ , A_ ) ->Tuple:
'''simple docstring'''
try:
__lowerCAmelCase : Tuple = pl_module.model.model.num_parameters()
except AttributeError:
__lowerCAmelCase : Optional[Any] = pl_module.model.num_parameters()
__lowerCAmelCase : List[str] = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self , A_ , A_ ) ->List[str]:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , '''test''' )
@rank_zero_only
def UpperCamelCase__ ( self , A_ , A_ ) ->str:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 705
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """realm"""
def __init__( self , A_=3_0522 , A_=768 , A_=128 , A_=12 , A_=12 , A_=8 , A_=3072 , A_="gelu_new" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=256 , A_=10 , A_=1e-3 , A_=5 , A_=320 , A_=1335_3718 , A_=5000 , A_=1 , A_=0 , A_=2 , **A_ , ) ->int:
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
# Common config
__lowerCAmelCase : List[Any] = vocab_size
__lowerCAmelCase : int = max_position_embeddings
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : Any = retriever_proj_size
__lowerCAmelCase : Optional[int] = num_hidden_layers
__lowerCAmelCase : str = num_attention_heads
__lowerCAmelCase : str = num_candidates
__lowerCAmelCase : List[str] = intermediate_size
__lowerCAmelCase : Any = hidden_act
__lowerCAmelCase : Any = hidden_dropout_prob
__lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : Any = type_vocab_size
__lowerCAmelCase : Dict = layer_norm_eps
# Reader config
__lowerCAmelCase : List[Any] = span_hidden_size
__lowerCAmelCase : int = max_span_width
__lowerCAmelCase : Tuple = reader_layer_norm_eps
__lowerCAmelCase : Optional[Any] = reader_beam_size
__lowerCAmelCase : Union[str, Any] = reader_seq_len
# Retrieval config
__lowerCAmelCase : List[str] = num_block_records
__lowerCAmelCase : List[Any] = searcher_beam_size
| 583
| 0
|
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = SMALL_MODEL_IDENTIFIER
__SCREAMING_SNAKE_CASE : int = "pt"
__SCREAMING_SNAKE_CASE : Dict = "tf"
def a_ ( self , a__ ):
__SCREAMING_SNAKE_CASE : str = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_UpperCamelCase )
def a_ ( self , a__ ):
__SCREAMING_SNAKE_CASE : Dict = TFAutoModel.from_pretrained(self.test_model , from_pt=_UpperCamelCase )
model_tf.save_pretrained(_UpperCamelCase )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[int] = "mock_framework"
# Framework provided - return whatever the user provides
__SCREAMING_SNAKE_CASE : Any = FeaturesManager.determine_framework(self.test_model , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_UpperCamelCase )
__SCREAMING_SNAKE_CASE : str = FeaturesManager.determine_framework(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_UpperCamelCase )
__SCREAMING_SNAKE_CASE : int = FeaturesManager.determine_framework(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def a_ ( self ):
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_UpperCamelCase )
__SCREAMING_SNAKE_CASE : str = FeaturesManager.determine_framework(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_UpperCamelCase )
__SCREAMING_SNAKE_CASE : Dict = FeaturesManager.determine_framework(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_UpperCamelCase ):
__SCREAMING_SNAKE_CASE : List[str] = FeaturesManager.determine_framework(_UpperCamelCase )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = MagicMock(return_value=_UpperCamelCase )
with patch("transformers.onnx.features.is_tf_available" , _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_UpperCamelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__SCREAMING_SNAKE_CASE : Optional[int] = MagicMock(return_value=_UpperCamelCase )
with patch("transformers.onnx.features.is_torch_available" , _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_UpperCamelCase , self.framework_tf )
# Both in environment -> use PyTorch
__SCREAMING_SNAKE_CASE : Dict = MagicMock(return_value=_UpperCamelCase )
__SCREAMING_SNAKE_CASE : Any = MagicMock(return_value=_UpperCamelCase )
with patch("transformers.onnx.features.is_tf_available" , _UpperCamelCase ), patch(
"transformers.onnx.features.is_torch_available" , _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_UpperCamelCase , self.framework_pt )
# Both not in environment -> raise error
__SCREAMING_SNAKE_CASE : Any = MagicMock(return_value=_UpperCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = MagicMock(return_value=_UpperCamelCase )
with patch("transformers.onnx.features.is_tf_available" , _UpperCamelCase ), patch(
"transformers.onnx.features.is_torch_available" , _UpperCamelCase ):
with self.assertRaises(_UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = FeaturesManager.determine_framework(self.test_model )
| 211
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCamelCase : Union[str, Any] = '''
Human: <<task>>
Assistant: '''
_lowerCamelCase : Optional[Any] = '''huggingface-tools/default-prompts'''
_lowerCamelCase : int = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple="run" ):
if prompt_or_repo_id is None:
SCREAMING_SNAKE_CASE = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , UpperCAmelCase__ ) is not None:
return prompt_or_repo_id
SCREAMING_SNAKE_CASE = cached_file(
UpperCAmelCase__ , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(UpperCAmelCase__ , "r" , encoding="utf-8" ) as f:
return f.read()
| 403
| 0
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
A_ : Optional[int] =logging.get_logger(__name__)
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : str )-> Dict:
if "xprophetnet" in prophetnet_checkpoint_path:
_lowerCamelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case )
_lowerCamelCase , _lowerCamelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
snake_case , output_loading_info=snake_case )
else:
_lowerCamelCase = ProphetNetForConditionalGenerationOld.from_pretrained(snake_case )
_lowerCamelCase , _lowerCamelCase = ProphetNetForConditionalGeneration.from_pretrained(
snake_case , output_loading_info=snake_case )
_lowerCamelCase = ['key_proj', 'value_proj', 'query_proj']
_lowerCamelCase = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
_lowerCamelCase = key.split('.' )
if attributes[0] == "lm_head":
_lowerCamelCase = prophet
_lowerCamelCase = prophet_old
else:
_lowerCamelCase = prophet.prophetnet
_lowerCamelCase = prophet_old.model
_lowerCamelCase = False
for attribute in attributes:
if attribute in mapping:
_lowerCamelCase = mapping[attribute]
if not hasattr(snake_case , snake_case ) and len(snake_case ) > 0:
_lowerCamelCase = attribute
elif hasattr(snake_case , snake_case ):
_lowerCamelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_lowerCamelCase = old_model.weight
logger.info(f'{attribute} is initialized.' )
_lowerCamelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_lowerCamelCase = old_model.bias
logger.info(f'{attribute} is initialized' )
_lowerCamelCase = True
break
elif attribute in special_keys and hasattr(snake_case , 'in_proj_weight' ):
_lowerCamelCase = old_model.in_proj_weight.shape[0] // 3
_lowerCamelCase = getattr(snake_case , snake_case )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_lowerCamelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_lowerCamelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_lowerCamelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_lowerCamelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_lowerCamelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_lowerCamelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_lowerCamelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
_lowerCamelCase = nn.Parameter(old_model.embed_positions.weight[:512, :] )
_lowerCamelCase = True
break
if attribute.isdigit():
_lowerCamelCase = model[int(snake_case )]
_lowerCamelCase = old_model[int(snake_case )]
else:
_lowerCamelCase = getattr(snake_case , snake_case )
if old_attribute == "":
_lowerCamelCase = old_model
else:
if not hasattr(snake_case , snake_case ):
raise ValueError(f'{old_model} does not have {old_attribute}' )
_lowerCamelCase = getattr(snake_case , snake_case )
if not is_key_init:
raise ValueError(f'{key} was not correctly initialized!' )
print(f'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(snake_case )
if __name__ == "__main__":
A_ : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A_ : str =parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 701
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __a ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = IFInpaintingPipeline
SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
SCREAMING_SNAKE_CASE__ : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : List[str] = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case_ ( self ):
return self._get_dummy_components()
def snake_case_ ( self , a__ , a__=0 ):
if str(a__ ).startswith('mps' ):
_lowerCamelCase = torch.manual_seed(a__ )
else:
_lowerCamelCase = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
_lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
_lowerCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def snake_case_ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def snake_case_ ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def snake_case_ ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case_ ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case_ ( self ):
self._test_save_load_local()
def snake_case_ ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 222
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.