code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
__UpperCAmelCase = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _snake_case ( A , A , A , A , A , A ) -> Optional[Any]:
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCAmelCase__ = '''lm_head'''
lowerCAmelCase__ = getattr(A , A )
if weight_type is not None:
lowerCAmelCase__ = getattr(A , A ).shape
else:
lowerCAmelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( A , A , A ) -> Any:
lowerCAmelCase__ = []
lowerCAmelCase__ = fairseq_model.state_dict()
lowerCAmelCase__ = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == '''group''' , )
lowerCAmelCase__ = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(A )[0].split('''.''' )[-2]
lowerCAmelCase__ = mapped_key.replace('''*''' , A )
if "weight_g" in name:
lowerCAmelCase__ = '''weight_g'''
elif "weight_v" in name:
lowerCAmelCase__ = '''weight_v'''
elif "bias" in name:
lowerCAmelCase__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ = '''weight'''
else:
lowerCAmelCase__ = None
set_recursively(A , A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _snake_case ( A , A , A , A , A ) -> List[str]:
lowerCAmelCase__ = full_name.split('''conv_layers.''' )[-1]
lowerCAmelCase__ = name.split('''.''' )
lowerCAmelCase__ = int(items[0] )
lowerCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(A )
@torch.no_grad()
def _snake_case ( A , A , A=None , A=None , A=True ) -> Union[str, Any]:
if config_path is not None:
lowerCAmelCase__ = UniSpeechConfig.from_pretrained(A )
else:
lowerCAmelCase__ = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCAmelCase__ = Dictionary.load_from_json(A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ = target_dict.pad_index
lowerCAmelCase__ = target_dict.bos_index
lowerCAmelCase__ = target_dict.eos_index
lowerCAmelCase__ = len(target_dict.symbols )
lowerCAmelCase__ = os.path.join(A , '''vocab.json''' )
if not os.path.isdir(A ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(A ) )
return
os.makedirs(A , exist_ok=A )
lowerCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ = 42
lowerCAmelCase__ = 43
with open(A , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(A , A )
lowerCAmelCase__ = WavaVecaPhonemeCTCTokenizer(
A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=A , )
lowerCAmelCase__ = True if config.feat_extract_norm == '''layer''' else False
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
lowerCAmelCase__ = WavaVecaProcessor(feature_extractor=A , tokenizer=A )
processor.save_pretrained(A )
lowerCAmelCase__ = UniSpeechForCTC(A )
else:
lowerCAmelCase__ = UniSpeechForPreTraining(A )
if is_finetuned:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCAmelCase__ = model[0].eval()
recursively_load_weights(A , A , A )
hf_unispeech.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__UpperCAmelCase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 90 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__UpperCAmelCase = logging.getLogger(__name__)
def _snake_case ( A , A , A = None , A = None , A = None , A = None , A = None , A = False , ) -> Union[str, Any]:
lowerCAmelCase__ = bnb_quantization_config.load_in_abit
lowerCAmelCase__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
lowerCAmelCase__ = []
# custom device map
if isinstance(A , A ) and len(device_map.keys() ) > 1:
lowerCAmelCase__ = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase__ = get_keys_to_not_convert(A )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(A )
lowerCAmelCase__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase__ = []
lowerCAmelCase__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(A )
# compatibility with peft
lowerCAmelCase__ = load_in_abit
lowerCAmelCase__ = load_in_abit
lowerCAmelCase__ = get_parameter_device(A )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
lowerCAmelCase__ = replace_with_bnb_layers(A , A , modules_to_not_convert=A )
# convert param to the right dtype
lowerCAmelCase__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCAmelCase__ = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
lowerCAmelCase__ = getattr(A , A , A )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(A ):
param.to(A )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
lowerCAmelCase__ = replace_with_bnb_layers(
A , A , modules_to_not_convert=A )
lowerCAmelCase__ = get_quantized_model_device_map(
A , A , A , max_memory=A , no_split_module_classes=A , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase__ = True
lowerCAmelCase__ = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
A , A , A , dtype=bnb_quantization_config.torch_dtype , offload_folder=A , offload_state_dict=A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(A , device_map=A , offload_dir=A )
def _snake_case ( A , A , A=None , A=None , A=None ) -> List[Any]:
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase__ = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(A , A ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
lowerCAmelCase__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCAmelCase__ = {}
lowerCAmelCase__ = special_dtypes
lowerCAmelCase__ = no_split_module_classes
lowerCAmelCase__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase__ = get_balanced_memory(
A , low_zero=(device_map == '''balanced_low_0''') , max_memory=A , **A , )
lowerCAmelCase__ = max_memory
lowerCAmelCase__ = infer_auto_device_map(A , **A )
if isinstance(A , A ):
# check if don't have any quantized module on the cpu
lowerCAmelCase__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def _snake_case ( A , A , A=None , A=None ) -> Any:
if modules_to_not_convert is None:
lowerCAmelCase__ = []
lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers(
A , A , A , A )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def _snake_case ( A , A , A=None , A=None , ) -> Optional[Any]:
lowerCAmelCase__ = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase__ = []
current_key_name.append(A )
if isinstance(A , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase__ = '''.'''.join(A )
lowerCAmelCase__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
lowerCAmelCase__ = module.weight.data
if module.bias is not None:
lowerCAmelCase__ = module.bias.data
bnb_module.requires_grad_(A )
setattr(A , A , A )
lowerCAmelCase__ = True
if len(list(module.children() ) ) > 0:
lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers(
A , A , A , A )
lowerCAmelCase__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _snake_case ( A ) -> Tuple:
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase__ = deepcopy(A ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase__ = find_tied_parameters(A )
# For compatibility with Accelerate < 0.18
if isinstance(A , A ):
lowerCAmelCase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCAmelCase__ = sum(A , [] )
lowerCAmelCase__ = len(A ) > 0
# Check if it is a base model
lowerCAmelCase__ = False
if hasattr(A , '''base_model_prefix''' ):
lowerCAmelCase__ = not hasattr(A , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase__ = list(model.named_children() )
lowerCAmelCase__ = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase__ = set(A ) - set(A )
lowerCAmelCase__ = list(set(A ) ) + list(A )
# remove ".weight" from the keys
lowerCAmelCase__ = ['''.weight''', '''.bias''']
lowerCAmelCase__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase__ = name.replace(A , '''''' )
filtered_module_names.append(A )
return filtered_module_names
def _snake_case ( A ) -> Optional[int]:
for m in model.modules():
if isinstance(A , bnb.nn.Linearabit ):
return True
return False
def _snake_case ( A ) -> Union[str, Any]:
return next(parameter.parameters() ).device
def _snake_case ( A , A , A , A , A , A , A ) -> Any:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(A , A , 0 , dtype=A , value=A )
lowerCAmelCase__ = param_name
lowerCAmelCase__ = model
if "." in tensor_name:
lowerCAmelCase__ = tensor_name.split('''.''' )
for split in splits[:-1]:
lowerCAmelCase__ = getattr(A , A )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
lowerCAmelCase__ = new_module
lowerCAmelCase__ = splits[-1]
# offload weights
lowerCAmelCase__ = False
offload_weight(module._parameters[tensor_name] , A , A , index=A )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , A , index=A , )
else:
offload_weight(A , A , A , index=A )
offload_weight(A , param_name.replace('''weight''' , '''SCB''' ) , A , index=A )
set_module_tensor_to_device(A , A , '''meta''' , dtype=A , value=torch.empty(*param.size() ) ) | 90 | 1 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : List[str] = ["input_features", "is_longer"]
def __init__( self , lowerCamelCase_=64 , lowerCamelCase_=4_80_00 , lowerCamelCase_=4_80 , lowerCamelCase_=10 , lowerCamelCase_=10_24 , lowerCamelCase_=0.0 , lowerCamelCase_=False , lowerCamelCase_ = 0 , lowerCamelCase_ = 1_40_00 , lowerCamelCase_ = None , lowerCamelCase_ = "fusion" , lowerCamelCase_ = "repeatpad" , **lowerCamelCase_ , ) -> Tuple:
super().__init__(
feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCAmelCase__ = top_db
lowerCAmelCase__ = truncation
lowerCAmelCase__ = padding
lowerCAmelCase__ = fft_window_size
lowerCAmelCase__ = (fft_window_size >> 1) + 1
lowerCAmelCase__ = hop_length
lowerCAmelCase__ = max_length_s
lowerCAmelCase__ = max_length_s * sampling_rate
lowerCAmelCase__ = sampling_rate
lowerCAmelCase__ = frequency_min
lowerCAmelCase__ = frequency_max
lowerCAmelCase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase_ , min_frequency=lowerCamelCase_ , max_frequency=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , norm=lowerCamelCase_ , mel_scale='''htk''' , )
lowerCAmelCase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase_ , min_frequency=lowerCamelCase_ , max_frequency=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , norm='''slaney''' , mel_scale='''slaney''' , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict[str, Any]:
lowerCAmelCase__ = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> np.ndarray:
lowerCAmelCase__ = spectrogram(
lowerCamelCase_ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase_ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
lowerCAmelCase__ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase__ = [0]
# randomly choose index for each part
lowerCAmelCase__ = np.random.choice(ranges[0] )
lowerCAmelCase__ = np.random.choice(ranges[1] )
lowerCAmelCase__ = np.random.choice(ranges[2] )
lowerCAmelCase__ = mel[idx_front : idx_front + chunk_frames, :]
lowerCAmelCase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCAmelCase__ = mel[idx_back : idx_back + chunk_frames, :]
lowerCAmelCase__ = torch.tensor(mel[None, None, :] )
lowerCAmelCase__ = torch.nn.functional.interpolate(
lowerCamelCase_ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=lowerCamelCase_ )
lowerCAmelCase__ = mel_shrink[0][0].numpy()
lowerCAmelCase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCAmelCase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCAmelCase__ = len(lowerCamelCase_ ) - max_length
lowerCAmelCase__ = np.random.randint(0 , overflow + 1 )
lowerCAmelCase__ = waveform[idx : idx + max_length]
lowerCAmelCase__ = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCAmelCase__ = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters )
lowerCAmelCase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCAmelCase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCAmelCase__ = np.stack([mel, mel, mel, mel] , axis=0 )
lowerCAmelCase__ = False
else:
lowerCAmelCase__ = self._random_mel_fusion(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowerCAmelCase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCAmelCase__ = int(max_length / len(lowerCamelCase_ ) )
lowerCAmelCase__ = np.stack(np.tile(lowerCamelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCAmelCase__ = int(max_length / len(lowerCamelCase_ ) )
lowerCAmelCase__ = np.stack(np.tile(lowerCamelCase_ , lowerCamelCase_ ) )
lowerCAmelCase__ = np.pad(lowerCamelCase_ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
lowerCAmelCase__ = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters )
lowerCAmelCase__ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowerCAmelCase__ = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> BatchFeature:
lowerCAmelCase__ = truncation if truncation is not None else self.truncation
lowerCAmelCase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCAmelCase__ = isinstance(lowerCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowerCAmelCase__ = is_batched_numpy or (
isinstance(lowerCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray ):
lowerCAmelCase__ = np.asarray(lowerCamelCase_ , dtype=np.floataa )
elif isinstance(lowerCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ = [np.asarray(lowerCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCAmelCase__ = [
self._get_input_mel(lowerCamelCase_ , max_length if max_length else self.nb_max_samples , lowerCamelCase_ , lowerCamelCase_ )
for waveform in raw_speech
]
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase_ )
is_longer.append(lowerCamelCase_ )
if truncation == "fusion" and sum(lowerCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCAmelCase__ = np.random.randint(0 , len(lowerCamelCase_ ) )
lowerCAmelCase__ = True
if isinstance(input_mel[0] , lowerCamelCase_ ):
lowerCAmelCase__ = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCAmelCase__ = [[longer] for longer in is_longer]
lowerCAmelCase__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowerCAmelCase__ = BatchFeature(lowerCamelCase_ )
if return_tensors is not None:
lowerCAmelCase__ = input_features.convert_to_tensors(lowerCamelCase_ )
return input_features | 90 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _snake_case ( A , A , A , A , A ) -> np.array:
lowerCAmelCase__ = int(np.ceil((x_end - xa) / step_size ) )
lowerCAmelCase__ = np.zeros((n + 1,) )
lowerCAmelCase__ = ya
lowerCAmelCase__ = xa
for k in range(A ):
lowerCAmelCase__ = y[k] + step_size * ode_func(A , y[k] )
lowerCAmelCase__ = y[k] + (
(step_size / 2) * (ode_func(A , y[k] ) + ode_func(x + step_size , A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 | 1 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__UpperCAmelCase = '''naver-clova-ix/donut-base'''
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = DonutProcessor.from_pretrained(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowerCAmelCase__ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowerCAmelCase__ = self.processor.tokenajson(lowerCamelCase_ )
self.assertDictEqual(lowerCamelCase_ , lowerCamelCase_ ) | 90 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=64 , lowerCamelCase_=None ) -> Dict:
lowerCAmelCase__ = np.random.default_rng(lowerCamelCase_ )
lowerCAmelCase__ = length
lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Any:
return self.length
def __getitem__( self , lowerCamelCase_ ) -> List[str]:
return {"x": self.x[i], "y": self.y[i]}
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> List[Any]:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Optional[Any]:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a[0] + self.b[0]
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> Any:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a + self.b
def _snake_case ( A , A = 16 ) -> Any:
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase__ = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
lowerCAmelCase__ = load_dataset('''csv''' , data_files=A )
lowerCAmelCase__ = datasets['''train'''].unique('''label''' )
lowerCAmelCase__ = {v: i for i, v in enumerate(A )}
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A , padding='''max_length''' )
if "label" in examples:
lowerCAmelCase__ = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
A , batched=A , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(A , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=2 )
lowerCAmelCase__ = DataLoader(tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=1 )
return train_dataloader, eval_dataloader | 90 | 1 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=33 , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_12 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ) -> List[str]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
lowerCAmelCase__ = EsmModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
lowerCAmelCase__ = model(lowerCamelCase_ )
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
lowerCAmelCase__ = EsmForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = EsmForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a__ ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = False
lowercase__ : Optional[Any] = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ : int = ()
lowercase__ : Optional[int] = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ : Tuple = True
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = EsmModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = EsmModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()[0]
lowerCAmelCase__ = EsmEmbeddings(config=lowerCamelCase_ )
lowerCAmelCase__ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCAmelCase__ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCAmelCase__ = create_position_ids_from_input_ids(lowerCamelCase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowerCamelCase_ , lowerCamelCase_ ) ) )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()[0]
lowerCAmelCase__ = EsmEmbeddings(config=lowerCamelCase_ )
lowerCAmelCase__ = torch.empty(2 , 4 , 30 )
lowerCAmelCase__ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCAmelCase__ = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCAmelCase__ = embeddings.create_position_ids_from_inputs_embeds(lowerCamelCase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowerCamelCase_ , lowerCamelCase_ ) ) )
@unittest.skip('''Esm does not support embedding resizing''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
@require_torch
class a__ ( a__ ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
with torch.no_grad():
lowerCAmelCase__ = EsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
lowerCAmelCase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase__ = model(lowerCamelCase_ )[0]
lowerCAmelCase__ = 33
lowerCAmelCase__ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , lowerCamelCase_ )
lowerCAmelCase__ = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
with torch.no_grad():
lowerCAmelCase__ = EsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
lowerCAmelCase__ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase__ = model(lowerCamelCase_ )[0]
# compare the actual values for a slice.
lowerCAmelCase__ = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1e-4 ) ) | 90 |
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__UpperCAmelCase = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def _snake_case ( A , A=None ) -> Optional[Any]:
require_version(deps[pkg] , A ) | 90 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def _snake_case ( A , A ) -> float:
lowerCAmelCase__ = u
for i in range(1 , A ):
lowerCAmelCase__ = temp * (u - i)
return temp
def _snake_case ( ) -> None:
lowerCAmelCase__ = int(input('''enter the numbers of values: ''' ) )
lowerCAmelCase__ = []
for _ in range(A ):
y.append([] )
for i in range(A ):
for j in range(A ):
y[i].append(A )
lowerCAmelCase__ = 0
print('''enter the values of parameters in a list: ''' )
lowerCAmelCase__ = list(map(A , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(A ):
lowerCAmelCase__ = float(input() )
lowerCAmelCase__ = int(input('''enter the value to interpolate: ''' ) )
lowerCAmelCase__ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , A ):
for j in range(n - i ):
lowerCAmelCase__ = y[j + 1][i - 1] - y[j][i - 1]
lowerCAmelCase__ = y[0][0]
for i in range(1 , A ):
summ += (ucal(A , A ) * y[0][i]) / math.factorial(A )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main() | 90 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A , A=False , A=False , A=False ) -> Union[str, Any]:
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def _snake_case ( A , A ) -> List[str]:
for i in range(config.num_hidden_layers ):
lowerCAmelCase__ = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _snake_case ( A ) -> List[str]:
lowerCAmelCase__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
def _snake_case ( A , A , A ) -> str:
lowerCAmelCase__ = dct.pop(A )
lowerCAmelCase__ = val
@torch.no_grad()
def _snake_case ( A , A ) -> Any:
lowerCAmelCase__ = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=A )
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
if "vqa" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = 3129
lowerCAmelCase__ = '''huggingface/label-files'''
lowerCAmelCase__ = '''vqa2-id2label.json'''
lowerCAmelCase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase__ = {int(A ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ = ViltForQuestionAnswering(A )
elif "nlvr" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = 2
lowerCAmelCase__ = {0: '''False''', 1: '''True'''}
lowerCAmelCase__ = {v: k for k, v in config.idalabel.items()}
lowerCAmelCase__ = 3
lowerCAmelCase__ = ViltForImagesAndTextClassification(A )
elif "irtr" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = ViltForImageAndTextRetrieval(A )
elif "mlm_itm" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = ViltForMaskedLM(A )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' )['''state_dict''']
lowerCAmelCase__ = create_rename_keys(A , A , A , A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , A )
if mlm_model or irtr_model:
lowerCAmelCase__ = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCAmelCase__ , lowerCAmelCase__ = model.load_state_dict(A , strict=A )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(A )
# Define processor
lowerCAmelCase__ = ViltImageProcessor(size=384 )
lowerCAmelCase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowerCAmelCase__ = ViltProcessor(A , A )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
lowerCAmelCase__ = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCAmelCase__ = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=A ).raw )
if mlm_model:
lowerCAmelCase__ = '''a bunch of [MASK] laying on a [MASK].'''
else:
lowerCAmelCase__ = '''How many cats are there?'''
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = model(**A )
# Verify outputs
if mlm_model:
lowerCAmelCase__ = torch.Size([1, 11, 30522] )
lowerCAmelCase__ = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCAmelCase__ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCAmelCase__ = torch.Size([1, 3129] )
lowerCAmelCase__ = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify vqa prediction equals "2"
lowerCAmelCase__ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCAmelCase__ = torch.Size([1, 2] )
lowerCAmelCase__ = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(A ).mkdir(exist_ok=A )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
processor.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__UpperCAmelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 90 | 1 |
'''simple docstring'''
# Imports
import numpy as np
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None ) -> List[Any]:
self.set_matricies(red=lowerCamelCase_ , green=lowerCamelCase_ , blue=lowerCamelCase_ , red_edge=lowerCamelCase_ , nir=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None ) -> List[str]:
if red is not None:
lowerCAmelCase__ = red
if green is not None:
lowerCAmelCase__ = green
if blue is not None:
lowerCAmelCase__ = blue
if red_edge is not None:
lowerCAmelCase__ = red_edge
if nir is not None:
lowerCAmelCase__ = nir
return True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_="" , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None ) -> Optional[Any]:
self.set_matricies(red=lowerCamelCase_ , green=lowerCamelCase_ , blue=lowerCamelCase_ , red_edge=lowerCamelCase_ , nir=lowerCamelCase_ )
lowerCAmelCase__ = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.nir * (self.red / (self.green**2))
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def __SCREAMING_SNAKE_CASE ( self ) -> str:
return (self.nir - self.red) / (self.nir + self.red)
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return (self.nir - self.blue) / (self.nir + self.blue)
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return (self.nir - self.green) / (self.nir + self.green)
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def __SCREAMING_SNAKE_CASE ( self ) -> str:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=0.08 , lowerCamelCase_=1.22 , lowerCamelCase_=0.03 ) -> int:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
return (self.nir / self.green) - 1
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return (self.nir / self.redEdge) - 1
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return (self.red - self.blue) / self.red
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return self.nir - self.green
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=0.16 ) -> Optional[Any]:
return (self.nir - self.green) / (self.nir + self.green + y)
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=0.5 ) -> Union[str, Any]:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None , lowerCamelCase_=None ) -> Any:
return (self.nir - b) / (a * self.red)
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return (self.red + self.green + self.blue) / 30.5
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return self.nir / self.red
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return (self.rvi() - 1) / (self.rvi() + 1)
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
return self.green / (self.nir + self.red + self.green)
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.nir / (self.nir + self.red + self.green)
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return self.red / (self.nir + self.red + self.green)
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return (self.green - self.red) / (self.green + self.red)
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
return (self.red - self.green) / (self.red + self.green)
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCAmelCase__ = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self.nir / self.red
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return (self.ndvi() + 0.5) ** (1 / 2)
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return (self.nir - self.redEdge) / (self.nir + self.redEdge) | 90 |
'''simple docstring'''
import re
def _snake_case ( A ) -> bool:
lowerCAmelCase__ = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(A , A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895''')) | 90 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_12 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ) -> Tuple:
lowerCAmelCase__ = parent
lowerCAmelCase__ = 13
lowerCAmelCase__ = 7
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = 99
lowerCAmelCase__ = 32
lowerCAmelCase__ = 2
lowerCAmelCase__ = 4
lowerCAmelCase__ = 37
lowerCAmelCase__ = '''gelu'''
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 5_12
lowerCAmelCase__ = 16
lowerCAmelCase__ = 2
lowerCAmelCase__ = 0.02
lowerCAmelCase__ = 3
lowerCAmelCase__ = 4
lowerCAmelCase__ = None
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCamelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
lowerCAmelCase__ = TFRoFormerModel(config=lowerCamelCase_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(lowerCamelCase_ )
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = True
lowerCAmelCase__ = TFRoFormerForCausalLM(config=lowerCamelCase_ )
lowerCAmelCase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase__ = model(lowerCamelCase_ )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = TFRoFormerForMaskedLM(config=lowerCamelCase_ )
lowerCAmelCase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFRoFormerForSequenceClassification(config=lowerCamelCase_ )
lowerCAmelCase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFRoFormerForMultipleChoice(config=lowerCamelCase_ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFRoFormerForTokenClassification(config=lowerCamelCase_ )
lowerCAmelCase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
lowerCAmelCase__ = TFRoFormerForQuestionAnswering(config=lowerCamelCase_ )
lowerCAmelCase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class a__ ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase__ : Tuple = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__ : Optional[Any] = False
lowercase__ : List[Any] = False
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = TFRoFormerModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(lowerCamelCase_ )
@require_tf
class a__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowerCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase__ = model(lowerCamelCase_ )[0]
# TODO Replace vocab size
lowerCAmelCase__ = 5_00_00
lowerCAmelCase__ = [1, 6, vocab_size]
self.assertEqual(output.shape , lowerCamelCase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowerCAmelCase__ = tf.constant(
[
[
[-0.12_053_341, -1.0_264_901, 0.29_221_946],
[-1.5_133_783, 0.197_433, 0.15_190_607],
[-5.0_135_403, -3.900_256, -0.84_038_764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1e-4 )
@require_tf
class a__ ( unittest.TestCase ):
'''simple docstring'''
lowercase__ : str = 1e-4
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = tf.constant([[4, 10]] )
lowerCAmelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowerCAmelCase__ = emba(input_ids.shape )
lowerCAmelCase__ = tf.constant(
[[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]] )
tf.debugging.assert_near(lowerCamelCase_ , lowerCamelCase_ , atol=self.tolerance )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = tf.constant(
[
[0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000],
[0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617],
[0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870],
] )
lowerCAmelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 )
emba([2, 16, 5_12] )
lowerCAmelCase__ = emba.weight[:3, :5]
tf.debugging.assert_near(lowerCamelCase_ , lowerCamelCase_ , atol=self.tolerance )
@require_tf
class a__ ( unittest.TestCase ):
'''simple docstring'''
lowercase__ : int = 1e-4
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# 2,12,16,64
lowerCAmelCase__ = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
lowerCAmelCase__ = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
lowerCAmelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowerCAmelCase__ = embed_positions([2, 16, 7_68] )[None, None, :, :]
lowerCAmelCase__ , lowerCAmelCase__ = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = tf.constant(
[
[0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700],
[-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343],
[-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985],
[-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871],
[0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980],
[3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253],
] )
lowerCAmelCase__ = tf.constant(
[
[0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700],
[0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343],
[1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985],
[2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871],
[-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980],
[-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , lowerCamelCase_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , lowerCamelCase_ , atol=self.tolerance ) | 90 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.txt'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
__UpperCAmelCase = {
'''facebook/esm2_t6_8M_UR50D''': 1_024,
'''facebook/esm2_t12_35M_UR50D''': 1_024,
}
def _snake_case ( A ) -> Optional[Any]:
with open(A , '''r''' ) as f:
lowerCAmelCase__ = f.read().splitlines()
return [l.strip() for l in lines]
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_="<eos>" , **lowerCamelCase_ , ) -> Tuple:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = load_vocab_file(lowerCamelCase_ )
lowerCAmelCase__ = dict(enumerate(self.all_tokens ) )
lowerCAmelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowerCAmelCase__ = unk_token
lowerCAmelCase__ = cls_token
lowerCAmelCase__ = pad_token
lowerCAmelCase__ = mask_token
lowerCAmelCase__ = eos_token
lowerCAmelCase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]:
return text.split()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False ) -> Dict:
return len(self._id_to_token )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return {token: i for i, token in enumerate(self.all_tokens )}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowerCAmelCase__ = [1] + ([0] * len(lowerCamelCase_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCamelCase_ ) + [1]
return mask
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = os.path.join(lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self.get_vocab_size(with_added_tokens=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False ) -> int:
return super()._add_tokens(lowerCamelCase_ , special_tokens=lowerCamelCase_ ) | 90 | 1 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A , A = None , A = None , A = False , ) -> tuple[int, float, str]:
lowerCAmelCase__ = cipher_alphabet or [chr(A ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCAmelCase__ = {
'''a''': 0.08_497,
'''b''': 0.01_492,
'''c''': 0.02_202,
'''d''': 0.04_253,
'''e''': 0.11_162,
'''f''': 0.02_228,
'''g''': 0.02_015,
'''h''': 0.06_094,
'''i''': 0.07_546,
'''j''': 0.00_153,
'''k''': 0.01_292,
'''l''': 0.04_025,
'''m''': 0.02_406,
'''n''': 0.06_749,
'''o''': 0.07_507,
'''p''': 0.01_929,
'''q''': 0.00_095,
'''r''': 0.07_587,
'''s''': 0.06_327,
'''t''': 0.09_356,
'''u''': 0.02_758,
'''v''': 0.00_978,
'''w''': 0.02_560,
'''x''': 0.00_150,
'''y''': 0.01_994,
'''z''': 0.00_077,
}
else:
# Custom frequencies dictionary
lowerCAmelCase__ = frequencies_dict
if not case_sensitive:
lowerCAmelCase__ = ciphertext.lower()
# Chi squared statistic values
lowerCAmelCase__ = {}
# cycle through all of the shifts
for shift in range(len(A ) ):
lowerCAmelCase__ = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCAmelCase__ = (alphabet_letters.index(letter.lower() ) - shift) % len(
A )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCAmelCase__ = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCAmelCase__ = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase__ = decrypted_with_shift.lower().count(A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase__ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase__ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase__ = decrypted_with_shift.count(A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase__ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase__ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCAmelCase__ = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(A ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCAmelCase__ = min(
A , key=A , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
) | 90 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = AltDiffusionPipeline
lowercase__ : Dict = TEXT_TO_IMAGE_PARAMS
lowercase__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __SCREAMING_SNAKE_CASE ( self ) -> str:
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowerCAmelCase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
lowerCAmelCase__ = CLIPTextModel(lowerCamelCase_ )
lowerCAmelCase__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCAmelCase__ = 77
lowerCAmelCase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> List[str]:
if str(lowerCamelCase_ ).startswith('''mps''' ):
lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ )
else:
lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCAmelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase__ = '''A photo of an astronaut'''
lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''numpy''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 90 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
__UpperCAmelCase = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = VOCAB_FILES_NAMES
lowercase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : int = ["input_ids", "attention_mask"]
lowercase__ : Optional[int] = RobertaTokenizer
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_="replace" , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_=False , lowerCamelCase_=True , **lowerCamelCase_ , ) -> List[Any]:
super().__init__(
lowerCamelCase_ , lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCamelCase_ ) != add_prefix_space:
lowerCAmelCase__ = getattr(lowerCamelCase_ , pre_tok_state.pop('''type''' ) )
lowerCAmelCase__ = add_prefix_space
lowerCAmelCase__ = pre_tok_class(**lowerCamelCase_ )
lowerCAmelCase__ = add_prefix_space
lowerCAmelCase__ = '''post_processor'''
lowerCAmelCase__ = getattr(self.backend_tokenizer , lowerCamelCase_ , lowerCamelCase_ )
if tokenizer_component_instance:
lowerCAmelCase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase__ = tuple(state['''sep'''] )
if "cls" in state:
lowerCAmelCase__ = tuple(state['''cls'''] )
lowerCAmelCase__ = False
if state.get('''add_prefix_space''' , lowerCamelCase_ ) != add_prefix_space:
lowerCAmelCase__ = add_prefix_space
lowerCAmelCase__ = True
if state.get('''trim_offsets''' , lowerCamelCase_ ) != trim_offsets:
lowerCAmelCase__ = trim_offsets
lowerCAmelCase__ = True
if changes_to_apply:
lowerCAmelCase__ = getattr(lowerCamelCase_ , state.pop('''type''' ) )
lowerCAmelCase__ = component_class(**lowerCamelCase_ )
setattr(self.backend_tokenizer , lowerCamelCase_ , lowerCamelCase_ )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else value
lowerCAmelCase__ = value
def __SCREAMING_SNAKE_CASE ( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> BatchEncoding:
lowerCAmelCase__ = kwargs.get('''is_split_into_words''' , lowerCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> BatchEncoding:
lowerCAmelCase__ = kwargs.get('''is_split_into_words''' , lowerCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> Tuple[str]:
lowerCAmelCase__ = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=None ) -> int:
lowerCAmelCase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 90 |
'''simple docstring'''
def _snake_case ( A , A ) -> int:
return x if y == 0 else greatest_common_divisor(A , x % y )
def _snake_case ( A , A ) -> int:
return (x * y) // greatest_common_divisor(A , A )
def _snake_case ( A = 20 ) -> int:
lowerCAmelCase__ = 1
for i in range(1 , n + 1 ):
lowerCAmelCase__ = lcm(A , A )
return g
if __name__ == "__main__":
print(f"""{solution() = }""") | 90 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 90 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCAmelCase = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def _snake_case ( A , A=None , A=None , A=None ) -> Union[str, Any]:
lowerCAmelCase__ = True
while ask_again:
lowerCAmelCase__ = input(A )
try:
if default is not None and len(A ) == 0:
return default
return convert_value(A ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(A )
def _snake_case ( A , A=[] , A=None , A=0 ) -> List[Any]:
lowerCAmelCase__ = BulletMenu(A , A )
lowerCAmelCase__ = menu.run(default_choice=A )
return convert_value(A ) if convert_value is not None else result
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = int(A )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = int(A )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def _snake_case ( A ) -> str:
lowerCAmelCase__ = int(A )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = int(A )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = int(A )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def _snake_case ( A ) -> List[str]:
return {"yes": True, "no": False}[value.lower()]
class a__ ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = super()._format_usage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = usage.replace('''<command> [<args>] ''' , '''''' )
return usage | 90 | 1 |
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_12 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ) -> List[str]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = BioGptModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Optional[Any]:
lowerCAmelCase__ = BioGptForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ ) -> str:
lowerCAmelCase__ = BioGptModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# create attention mask
lowerCAmelCase__ = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCamelCase_ )
lowerCAmelCase__ = self.seq_length // 2
lowerCAmelCase__ = 0
# first forward pass
lowerCAmelCase__ , lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
lowerCAmelCase__ = ids_tensor((1,) , lowerCamelCase_ ).item() + 1
lowerCAmelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
lowerCAmelCase__ = random_other_next_tokens
# append to next input_ids and attn_mask
lowerCAmelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCamelCase_ )] , dim=1 , )
# get two different outputs
lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )['''last_hidden_state''']
lowerCAmelCase__ = model(lowerCamelCase_ , past_key_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ )['''last_hidden_state''']
# select random slice
lowerCAmelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCAmelCase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ ) -> Tuple:
lowerCAmelCase__ = BioGptModel(config=lowerCamelCase_ ).to(lowerCamelCase_ ).eval()
lowerCAmelCase__ = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCamelCase_ )
# first forward pass
lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , use_cache=lowerCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCAmelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )['''last_hidden_state''']
lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ )[
'''last_hidden_state'''
]
# select random slice
lowerCAmelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , lowerCamelCase_=False ) -> List[Any]:
lowerCAmelCase__ = BioGptForCausalLM(lowerCamelCase_ )
model.to(lowerCamelCase_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
lowerCAmelCase__ = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , *lowerCamelCase_ ) -> Optional[Any]:
lowerCAmelCase__ = BioGptModel(lowerCamelCase_ )
lowerCAmelCase__ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ ) -> str:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = BioGptForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a__ ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[Any] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowercase__ : Any = (BioGptForCausalLM,) if is_torch_available() else ()
lowercase__ : Optional[Any] = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ : Tuple = False
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = BioGptModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCamelCase_ , gradient_checkpointing=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(lowerCamelCase_ )
lowerCAmelCase__ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
lowerCAmelCase__ = '''left'''
# Define PAD Token = EOS Token = 50256
lowerCAmelCase__ = tokenizer.eos_token
lowerCAmelCase__ = model.config.eos_token_id
# use different length sentences to test batching
lowerCAmelCase__ = [
'''Hello, my dog is a little''',
'''Today, I''',
]
lowerCAmelCase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' , padding=lowerCamelCase_ )
lowerCAmelCase__ = inputs['''input_ids'''].to(lowerCamelCase_ )
lowerCAmelCase__ = model.generate(
input_ids=lowerCamelCase_ , attention_mask=inputs['''attention_mask'''].to(lowerCamelCase_ ) , )
lowerCAmelCase__ = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(lowerCamelCase_ )
lowerCAmelCase__ = model.generate(input_ids=lowerCamelCase_ )
lowerCAmelCase__ = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
lowerCAmelCase__ = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(lowerCamelCase_ )
lowerCAmelCase__ = model.generate(input_ids=lowerCamelCase_ , max_length=model.config.max_length - num_paddings )
lowerCAmelCase__ = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , [non_padded_sentence, padded_sentence] )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = BioGptModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = 3
lowerCAmelCase__ = input_dict['''input_ids''']
lowerCAmelCase__ = input_ids.ne(1 ).to(lowerCamelCase_ )
lowerCAmelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ = BioGptForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = 3
lowerCAmelCase__ = '''multi_label_classification'''
lowerCAmelCase__ = input_dict['''input_ids''']
lowerCAmelCase__ = input_ids.ne(1 ).to(lowerCamelCase_ )
lowerCAmelCase__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase__ = BioGptForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class a__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
lowerCAmelCase__ = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
lowerCAmelCase__ = model(lowerCamelCase_ )[0]
lowerCAmelCase__ = 4_23_84
lowerCAmelCase__ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowerCamelCase_ )
lowerCAmelCase__ = torch.tensor(
[[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
lowerCAmelCase__ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(lowerCamelCase_ )
torch.manual_seed(0 )
lowerCAmelCase__ = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(lowerCamelCase_ )
lowerCAmelCase__ = model.generate(
**lowerCamelCase_ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=lowerCamelCase_ , )
lowerCAmelCase__ = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) | 90 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : torch.FloatTensor
class a__ ( a__ , a__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCamelCase_ = 3 , lowerCamelCase_ = 3 , lowerCamelCase_ = ("DownEncoderBlock2D",) , lowerCamelCase_ = ("UpDecoderBlock2D",) , lowerCamelCase_ = (64,) , lowerCamelCase_ = 1 , lowerCamelCase_ = "silu" , lowerCamelCase_ = 3 , lowerCamelCase_ = 32 , lowerCamelCase_ = 2_56 , lowerCamelCase_ = 32 , lowerCamelCase_ = None , lowerCamelCase_ = 0.18_215 , lowerCamelCase_ = "group" , ) -> Union[str, Any]:
super().__init__()
# pass init params to Encoder
lowerCAmelCase__ = Encoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , down_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , double_z=lowerCamelCase_ , )
lowerCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
lowerCAmelCase__ = VectorQuantizer(lowerCamelCase_ , lowerCamelCase_ , beta=0.25 , remap=lowerCamelCase_ , sane_index_shape=lowerCamelCase_ )
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
# pass init params to Decoder
lowerCAmelCase__ = Decoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , up_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , norm_type=lowerCamelCase_ , )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> VQEncoderOutput:
lowerCAmelCase__ = self.encoder(lowerCamelCase_ )
lowerCAmelCase__ = self.quant_conv(lowerCamelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCamelCase_ )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.quantize(lowerCamelCase_ )
else:
lowerCAmelCase__ = h
lowerCAmelCase__ = self.post_quant_conv(lowerCamelCase_ )
lowerCAmelCase__ = self.decoder(lowerCamelCase_ , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase__ = sample
lowerCAmelCase__ = self.encode(lowerCamelCase_ ).latents
lowerCAmelCase__ = self.decode(lowerCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ ) | 90 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
__UpperCAmelCase = logging.getLogger(__name__)
__UpperCAmelCase = {'''facebook/bart-base''': BartForConditionalGeneration}
__UpperCAmelCase = {'''facebook/bart-base''': BartTokenizer}
def _snake_case ( ) -> Any:
lowerCAmelCase__ = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=A , default=A , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=A , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=A , default=A , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=A , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=A , )
parser.add_argument(
'''--config_name''' , type=A , default=A , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=A , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=A , default=A , help='''Where to store the final ONNX file.''' )
lowerCAmelCase__ = parser.parse_args()
return args
def _snake_case ( A , A="cpu" ) -> int:
lowerCAmelCase__ = model_dict[model_name].from_pretrained(A ).to(A )
lowerCAmelCase__ = tokenizer_dict[model_name].from_pretrained(A )
if model_name in ["facebook/bart-base"]:
lowerCAmelCase__ = 0
lowerCAmelCase__ = None
lowerCAmelCase__ = 0
return huggingface_model, tokenizer
def _snake_case ( A , A , A , A , A ) -> Union[str, Any]:
model.eval()
lowerCAmelCase__ = None
lowerCAmelCase__ = torch.jit.script(BARTBeamSearchGenerator(A ) )
with torch.no_grad():
lowerCAmelCase__ = '''My friends are cool but they eat too many carbs.'''
lowerCAmelCase__ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='''pt''' ).to(model.device )
lowerCAmelCase__ = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=A , max_length=A , early_stopping=A , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
A , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , A , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=A , )
logger.info('''Model exported to {}'''.format(A ) )
lowerCAmelCase__ = remove_dup_initializers(os.path.abspath(A ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(A ) )
lowerCAmelCase__ = onnxruntime.InferenceSession(A )
lowerCAmelCase__ = ort_sess.run(
A , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(A ),
'''max_length''': np.array(A ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def _snake_case ( ) -> List[str]:
lowerCAmelCase__ = parse_args()
lowerCAmelCase__ = 5
lowerCAmelCase__ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
lowerCAmelCase__ = torch.device(args.device )
lowerCAmelCase__ , lowerCAmelCase__ = load_model_tokenizer(args.model_name_or_path , A )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(A )
if args.max_length:
lowerCAmelCase__ = args.max_length
if args.num_beams:
lowerCAmelCase__ = args.num_beams
if args.output_file_path:
lowerCAmelCase__ = args.output_file_path
else:
lowerCAmelCase__ = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(A , A , A , A , A )
if __name__ == "__main__":
main() | 90 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = list[list[int]]
# assigning initial values to the grid
__UpperCAmelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCAmelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _snake_case ( A , A , A , A ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _snake_case ( A ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _snake_case ( A ) -> Matrix | None:
if location := find_empty_location(A ):
lowerCAmelCase__ , lowerCAmelCase__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A , A , A , A ):
lowerCAmelCase__ = digit
if sudoku(A ) is not None:
return grid
lowerCAmelCase__ = 0
return None
def _snake_case ( A ) -> None:
for row in grid:
for cell in row:
print(A , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__UpperCAmelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''') | 90 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__UpperCAmelCase = ['''small''', '''medium''', '''large''']
__UpperCAmelCase = '''lm_head.decoder.weight'''
__UpperCAmelCase = '''lm_head.weight'''
def _snake_case ( A , A ) -> Any:
lowerCAmelCase__ = torch.load(A )
lowerCAmelCase__ = d.pop(A )
os.makedirs(A , exist_ok=A )
torch.save(A , os.path.join(A , A ) )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
__UpperCAmelCase = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__UpperCAmelCase = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
__UpperCAmelCase = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
) | 90 |
'''simple docstring'''
def _snake_case ( A ) -> int:
if n == 1 or not isinstance(A , A ):
return 0
elif n == 2:
return 1
else:
lowerCAmelCase__ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _snake_case ( A ) -> int:
lowerCAmelCase__ = 0
lowerCAmelCase__ = 2
while digits < n:
index += 1
lowerCAmelCase__ = len(str(fibonacci(A ) ) )
return index
def _snake_case ( A = 1000 ) -> int:
return fibonacci_digits_index(A )
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 90 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=10 , lowerCamelCase_=3 , lowerCamelCase_=2 , lowerCamelCase_=2 , lowerCamelCase_=2 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=10 , lowerCamelCase_=0.02 , lowerCamelCase_=0.9 , lowerCamelCase_=None , ) -> Tuple:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = tubelet_size
lowerCAmelCase__ = num_frames
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = mask_ratio
lowerCAmelCase__ = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowerCAmelCase__ = int(mask_ratio * self.seq_length )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
lowerCAmelCase__ = VideoMAEModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
lowerCAmelCase__ = VideoMAEForPreTraining(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCAmelCase__ = torch.ones((self.num_masks,) )
lowerCAmelCase__ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowerCAmelCase__ = mask.expand(self.batch_size , -1 ).bool()
lowerCAmelCase__ = model(lowerCamelCase_ , lowerCamelCase_ )
# model only returns predictions for masked patches
lowerCAmelCase__ = mask.sum().item()
lowerCAmelCase__ = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[Any] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowercase__ : Tuple = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowercase__ : Optional[Any] = False
lowercase__ : str = False
lowercase__ : Any = False
lowercase__ : Dict = False
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = VideoMAEModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> List[Any]:
lowerCAmelCase__ = copy.deepcopy(lowerCamelCase_ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCAmelCase__ = torch.ones((self.model_tester.num_masks,) )
lowerCAmelCase__ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowerCAmelCase__ = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowerCAmelCase__ = bool_masked_pos.to(lowerCamelCase_ )
if return_labels:
if model_class in [
*get_values(lowerCamelCase_ ),
]:
lowerCAmelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def __SCREAMING_SNAKE_CASE ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = VideoMAEModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
if not self.has_attentions:
pass
else:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
lowerCAmelCase__ = self.model_tester.seq_length - self.model_tester.num_masks
lowerCAmelCase__ = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCAmelCase__ = len(lowerCamelCase_ )
# Check attention is always last and order is fine
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(out_len + 1 , len(lowerCamelCase_ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
def check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
lowerCAmelCase__ = self.model_tester.seq_length - self.model_tester.num_masks
lowerCAmelCase__ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
def _snake_case ( ) -> Any:
lowerCAmelCase__ = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowerCAmelCase__ = np.load(A )
return list(A )
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
lowerCamelCase_ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_video()
lowerCAmelCase__ = image_processor(lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCamelCase_ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
lowerCAmelCase__ = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(lowerCamelCase_ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_video()
lowerCAmelCase__ = image_processor(lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# add boolean mask, indicating which patches to mask
lowerCAmelCase__ = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowerCAmelCase__ = torch.load(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCamelCase_ )
# verify the logits
lowerCAmelCase__ = torch.Size([1, 14_08, 15_36] )
lowerCAmelCase__ = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=lowerCamelCase_ )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase_ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowerCAmelCase__ = torch.tensor([0.5_142] , device=lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.loss , lowerCamelCase_ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowerCAmelCase__ = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=lowerCamelCase_ ).to(
lowerCamelCase_ )
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCamelCase_ )
lowerCAmelCase__ = torch.tensor(torch.tensor([0.6_469] ) , device=lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.loss , lowerCamelCase_ , atol=1e-4 ) ) | 90 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def _snake_case ( A ) -> int:
return choice(A )
def _snake_case ( A , A ) -> int:
lowerCAmelCase__ = random_pivot(A )
# partition based on pivot
# linear time
lowerCAmelCase__ = [e for e in lst if e < pivot]
lowerCAmelCase__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(A ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(A ) < k - 1:
return kth_number(A , k - len(A ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(A , A )
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__UpperCAmelCase = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
__UpperCAmelCase = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
__UpperCAmelCase = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _snake_case ( A , A ) -> Union[str, Any]:
return float((preds == labels).mean() )
def _snake_case ( A , A ) -> Union[str, Any]:
lowerCAmelCase__ = simple_accuracy(A , A )
lowerCAmelCase__ = float(fa_score(y_true=A , y_pred=A ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case ( A , A ) -> int:
lowerCAmelCase__ = float(pearsonr(A , A )[0] )
lowerCAmelCase__ = float(spearmanr(A , A )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowerCamelCase_ , lowerCamelCase_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowerCamelCase_ , lowerCamelCase_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowerCamelCase_ , lowerCamelCase_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' ) | 90 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 90 | 1 |
'''simple docstring'''
from torch import nn
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ ) -> str:
super().__init__()
lowerCAmelCase__ = class_size
lowerCAmelCase__ = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowerCAmelCase__ = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[Any]:
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
lowerCAmelCase__ = self.mlp(lowerCamelCase_ )
return logits | 90 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('''KEY''')
__UpperCAmelCase = TypeVar('''VAL''')
@dataclass(frozen=a__ , slots=a__ )
class a__ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowercase__ : KEY
lowercase__ : VAL
class a__ ( _Item ):
'''simple docstring'''
def __init__( self ) -> None:
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __bool__( self ) -> bool:
return False
__UpperCAmelCase = _DeletedItem()
class a__ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ = 8 , lowerCamelCase_ = 0.75 ) -> None:
lowerCAmelCase__ = initial_block_size
lowerCAmelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCAmelCase__ = capacity_factor
lowerCAmelCase__ = 0
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return hash(lowerCamelCase_ ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool:
lowerCAmelCase__ = self._buckets[ind]
if not stored:
lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = self._buckets
lowerCAmelCase__ = [None] * new_size
lowerCAmelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Iterator[int]:
lowerCAmelCase__ = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCAmelCase__ = self._get_next_ind(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
break
def __setitem__( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_ , lowerCamelCase_ )
def __delitem__( self , lowerCamelCase_ ) -> None:
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCAmelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , lowerCamelCase_ ) -> VAL:
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
lowerCAmelCase__ = ''' ,'''.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})""" | 90 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__UpperCAmelCase = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Dict = "facebook/nllb-200-distilled-600M"
lowercase__ : List[Any] = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
lowercase__ : Union[str, Any] = "translator"
lowercase__ : Union[str, Any] = AutoTokenizer
lowercase__ : Dict = AutoModelForSeqaSeqLM
lowercase__ : Tuple = LANGUAGE_CODES
lowercase__ : int = ["text", "text", "text"]
lowercase__ : Dict = ["text"]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
if src_lang not in self.lang_to_code:
raise ValueError(F"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"""{tgt_lang} is not a supported language.""" )
lowerCAmelCase__ = self.lang_to_code[src_lang]
lowerCAmelCase__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCamelCase_ , return_tensors='''pt''' , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[Any]:
return self.model.generate(**lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Any:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase_ ) | 90 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _snake_case ( A , A , A ) -> Union[str, Any]:
lowerCAmelCase__ = OmegaConf.load(A )
lowerCAmelCase__ = torch.load(A , map_location='''cpu''' )['''model''']
lowerCAmelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase__ = {}
lowerCAmelCase__ = '''first_stage_model.'''
for key in keys:
if key.startswith(A ):
lowerCAmelCase__ = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase__ = {}
lowerCAmelCase__ = '''model.diffusion_model.'''
for key in keys:
if key.startswith(A ):
lowerCAmelCase__ = state_dict[key]
lowerCAmelCase__ = config.model.params.first_stage_config.params
lowerCAmelCase__ = config.model.params.unet_config.params
lowerCAmelCase__ = VQModel(**A ).eval()
vqvae.load_state_dict(A )
lowerCAmelCase__ = UNetLDMModel(**A ).eval()
unet.load_state_dict(A )
lowerCAmelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=A , )
lowerCAmelCase__ = LDMPipeline(A , A , A )
pipeline.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
__UpperCAmelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path) | 90 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=a__ ):
'''simple docstring'''
lowercase__ : List[str] = ["transformers", "torch", "note_seq"]
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> Any:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *lowerCamelCase_ , **lowerCamelCase_ ) -> Tuple:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[int]:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) | 90 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : bool = field(default=a__ , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase__ : bool = field(
default=a__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=a__ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = v.to_dict()
return d | 90 | 1 |
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
__UpperCAmelCase = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
__UpperCAmelCase = '''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
__UpperCAmelCase = '''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
__UpperCAmelCase = '''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[int]:
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=0.9 , lowerCamelCase_=3 , lowerCamelCase_=0.5 ) -> Dict:
if NLTK_VERSION >= version.Version('''3.6.5''' ):
lowerCAmelCase__ = [
meteor_score.single_meteor_score(
word_tokenize(lowerCamelCase_ ) , word_tokenize(lowerCamelCase_ ) , alpha=lowerCamelCase_ , beta=lowerCamelCase_ , gamma=lowerCamelCase_ )
for ref, pred in zip(lowerCamelCase_ , lowerCamelCase_ )
]
else:
lowerCAmelCase__ = [
meteor_score.single_meteor_score(lowerCamelCase_ , lowerCamelCase_ , alpha=lowerCamelCase_ , beta=lowerCamelCase_ , gamma=lowerCamelCase_ )
for ref, pred in zip(lowerCamelCase_ , lowerCamelCase_ )
]
return {"meteor": np.mean(lowerCamelCase_ )} | 90 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCAmelCase = False
class a__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = generator.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''cyberpunk 2077'''
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCAmelCase__ = '''A painting of a squirrel eating a burger '''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.text_to_image(
prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCAmelCase__ = pipe.image_variation(lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 90 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__UpperCAmelCase = random.Random()
def _snake_case ( A , A=1.0 , A=None , A=None ) -> int:
if rng is None:
lowerCAmelCase__ = global_rng
lowerCAmelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=7 , lowerCamelCase_=4_00 , lowerCamelCase_=20_00 , lowerCamelCase_=24 , lowerCamelCase_=24 , lowerCamelCase_=0.0 , lowerCamelCase_=1_60_00 , lowerCamelCase_=True , lowerCamelCase_=True , ) -> Any:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = min_seq_length
lowerCAmelCase__ = max_seq_length
lowerCAmelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase__ = feature_size
lowerCAmelCase__ = num_mel_bins
lowerCAmelCase__ = padding_value
lowerCAmelCase__ = sampling_rate
lowerCAmelCase__ = return_attention_mask
lowerCAmelCase__ = do_normalize
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False , lowerCamelCase_=False ) -> List[str]:
def _flatten(lowerCamelCase_ ):
return list(itertools.chain(*lowerCamelCase_ ) )
if equal_length:
lowerCAmelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase__ = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : str = SpeechaTextFeatureExtractor if is_speech_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = SpeechaTextFeatureExtractionTester(self )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Tuple:
self.assertTrue(np.all(np.mean(lowerCamelCase_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase_ , axis=0 ) - 1 ) < 1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test feature size
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
lowerCAmelCase__ = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
lowerCAmelCase__ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# Test batched
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_features
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowerCAmelCase__ = np.asarray(lowerCamelCase_ )
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_features
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = ['''longest''', '''max_length''', '''do_not_pad''']
lowerCAmelCase__ = [None, 16, None]
for max_length, padding in zip(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = feature_extractor(
lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ )
lowerCAmelCase__ = inputs.input_features
lowerCAmelCase__ = inputs.attention_mask
lowerCAmelCase__ = [np.sum(lowerCamelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = ['''longest''', '''max_length''', '''do_not_pad''']
lowerCAmelCase__ = [None, 16, None]
for max_length, padding in zip(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = feature_extractor(
lowerCamelCase_ , max_length=lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''np''' , return_attention_mask=lowerCamelCase_ )
lowerCAmelCase__ = inputs.input_features
lowerCAmelCase__ = inputs.attention_mask
lowerCAmelCase__ = [np.sum(lowerCamelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = feature_extractor(
lowerCamelCase_ , padding='''max_length''' , max_length=4 , truncation=lowerCamelCase_ , return_tensors='''np''' , return_attention_mask=lowerCamelCase_ , )
lowerCAmelCase__ = inputs.input_features
lowerCAmelCase__ = inputs.attention_mask
lowerCAmelCase__ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = feature_extractor(
lowerCamelCase_ , padding='''longest''' , max_length=4 , truncation=lowerCamelCase_ , return_tensors='''np''' , return_attention_mask=lowerCamelCase_ , )
lowerCAmelCase__ = inputs.input_features
lowerCAmelCase__ = inputs.attention_mask
lowerCAmelCase__ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = feature_extractor(
lowerCamelCase_ , padding='''longest''' , max_length=16 , truncation=lowerCamelCase_ , return_tensors='''np''' , return_attention_mask=lowerCamelCase_ , )
lowerCAmelCase__ = inputs.input_features
lowerCAmelCase__ = inputs.attention_mask
lowerCAmelCase__ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
import torch
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = np.random.rand(1_00 , 32 ).astype(np.floataa )
lowerCAmelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase__ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCAmelCase__ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Dict:
from datasets import load_dataset
lowerCAmelCase__ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
lowerCAmelCase__ = ds.sort('''id''' ).select(range(lowerCamelCase_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# fmt: off
lowerCAmelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
lowerCAmelCase__ = self._load_datasamples(1 )
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 5_84, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowerCamelCase_ , atol=1e-4 ) ) | 90 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A ) -> bool:
lowerCAmelCase__ = str(A )
return len(A ) == 9 and set(A ) == set('''123456789''' )
def _snake_case ( ) -> int | None:
for base_num in range(9999 , 4999 , -1 ):
lowerCAmelCase__ = 100002 * base_num
if is_9_pandigital(A ):
return candidate
for base_num in range(333 , 99 , -1 ):
lowerCAmelCase__ = 1002003 * base_num
if is_9_pandigital(A ):
return candidate
return None
if __name__ == "__main__":
print(f"""{solution() = }""") | 90 | 1 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase = '''pt'''
elif is_tf_available():
__UpperCAmelCase = '''tf'''
else:
__UpperCAmelCase = '''jax'''
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[Any] = ByTaTokenizer
lowercase__ : Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
super().setUp()
lowerCAmelCase__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=20 , lowerCamelCase_=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCAmelCase__ = []
for i in range(len(lowerCamelCase_ ) ):
try:
lowerCAmelCase__ = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase__ = list(filter(lambda lowerCamelCase_ : re.match(r'''^[ a-zA-Z]+$''' , t[1] ) , lowerCamelCase_ ) )
lowerCAmelCase__ = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCamelCase_ ) , lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
lowerCAmelCase__ = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
lowerCAmelCase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase__ = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase__ = tokenizer.decode(lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
lowerCAmelCase__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCamelCase_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
lowerCAmelCase__ = ''' ''' + output_txt
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
lowerCAmelCase__ = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = '''Unicode €.'''
lowerCAmelCase__ = tokenizer(lowerCamelCase_ )
lowerCAmelCase__ = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded['''input_ids'''] , lowerCamelCase_ )
# decoding
lowerCAmelCase__ = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , '''Unicode €.</s>''' )
lowerCAmelCase__ = tokenizer('''e è é ê ë''' )
lowerCAmelCase__ = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded['''input_ids'''] , lowerCamelCase_ )
# decoding
lowerCAmelCase__ = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCAmelCase__ = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
lowerCAmelCase__ = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
if FRAMEWORK != "jax":
lowerCAmelCase__ = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCAmelCase__ = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , lowerCamelCase_ )
self.assertIn('''attention_mask''' , lowerCamelCase_ )
self.assertNotIn('''decoder_input_ids''' , lowerCamelCase_ )
self.assertNotIn('''decoder_attention_mask''' , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = [
'''Summary of the text.''',
'''Another summary.''',
]
lowerCAmelCase__ = tokenizer(
text_target=lowerCamelCase_ , max_length=32 , padding='''max_length''' , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = ['''A long paragraph for summarization. </s>''']
lowerCAmelCase__ = ['''Summary of the text. </s>''']
# fmt: off
lowerCAmelCase__ = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
lowerCAmelCase__ = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
lowerCAmelCase__ = tokenizer(lowerCamelCase_ , text_target=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , batch['''input_ids'''][0] )
self.assertEqual(lowerCamelCase_ , batch['''labels'''][0] )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
# safety check on max_len default value so we are sure the test works
lowerCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = ''' He is very happy, UNwant\u00E9d,running'''
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = after_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
lowerCAmelCase__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCAmelCase__ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = after_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCAmelCase__ = tokenizer.__class__.from_pretrained(lowerCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase__ = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase__ = json.load(lowerCamelCase_ )
lowerCAmelCase__ = [F"""<extra_id_{i}>""" for i in range(1_25 )]
lowerCAmelCase__ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCAmelCase__ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowerCamelCase_ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase__ = tokenizer_class.from_pretrained(
lowerCamelCase_ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase__ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowerCamelCase_ )]
lowerCAmelCase__ = tokenizer_class.from_pretrained(
lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_class.from_pretrained(lowerCamelCase_ )
self.assertTrue(tokenizer.decode([2_55] ) == '''''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
lowerCAmelCase__ = self.get_tokenizers(fast=lowerCamelCase_ , do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase__ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
lowerCAmelCase__ = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase__ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
lowerCAmelCase__ = 0
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
for attr in attributes_list:
setattr(lowerCamelCase_ , attr + '''_id''' , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , attr + '''_id''' ) , lowerCamelCase_ )
setattr(lowerCamelCase_ , attr + '''_id''' , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , attr + '''_id''' ) , lowerCamelCase_ )
setattr(lowerCamelCase_ , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(lowerCamelCase_ , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(lowerCamelCase_ , '''additional_special_tokens_ids''' ) , [] )
setattr(lowerCamelCase_ , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(lowerCamelCase_ , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(lowerCamelCase_ , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] ) | 90 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__UpperCAmelCase = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(vocab, range(len(vocab))))
__UpperCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = Path(tmpdirname)
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__UpperCAmelCase = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__UpperCAmelCase = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__UpperCAmelCase = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
__UpperCAmelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__UpperCAmelCase = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru | 90 | 1 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = '''▁'''
__UpperCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : int = BertGenerationTokenizer
lowercase__ : Optional[int] = False
lowercase__ : Tuple = True
def __SCREAMING_SNAKE_CASE ( self ) -> int:
super().setUp()
lowerCAmelCase__ = BertGenerationTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = '''<s>'''
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(lowerCamelCase_ ) , 10_02 )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = BertGenerationTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [2_85, 46, 10, 1_70, 3_82] , )
lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = '''Hello World!'''
lowerCAmelCase__ = [1_85_36, 22_60, 1_01]
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowerCAmelCase__ = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCAmelCase__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase__ = ''' '''.join(lowerCamelCase_ )
lowerCAmelCase__ = self.big_tokenizer.encode_plus(lowerCamelCase_ , return_tensors='''pt''' , return_token_type_ids=lowerCamelCase_ )
lowerCAmelCase__ = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=lowerCamelCase_ )
lowerCAmelCase__ = BertGenerationConfig()
lowerCAmelCase__ = BertGenerationEncoder(lowerCamelCase_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase_ )
model(**lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# fmt: off
lowerCAmelCase__ = {'''input_ids''': [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , ) | 90 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _snake_case ( ) -> Union[str, Any]:
raise RuntimeError('''CUDA out of memory.''' )
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> int:
super().__init__()
lowerCAmelCase__ = nn.Linear(3 , 4 )
lowerCAmelCase__ = nn.BatchNormad(4 )
lowerCAmelCase__ = nn.Linear(4 , 5 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_ ) ) )
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowerCAmelCase__ , lowerCAmelCase__ = mock_training_loop_function('''hello''' )
self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCamelCase_ ):
pass
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase_ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function(1_28 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase_ ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = torch.cuda.memory_allocated()
lowerCAmelCase__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_ )
lowerCAmelCase__ = release_memory(lowerCamelCase_ )
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_ ) | 90 | 1 |
'''simple docstring'''
from __future__ import annotations
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = order
# a_{0} ... a_{k}
lowerCAmelCase__ = [1.0] + [0.0] * order
# b_{0} ... b_{k}
lowerCAmelCase__ = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
lowerCAmelCase__ = [0.0] * self.order
# y[n-1] ... y[n-k]
lowerCAmelCase__ = [0.0] * self.order
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if len(lowerCamelCase_ ) < self.order:
lowerCAmelCase__ = [1.0, *a_coeffs]
if len(lowerCamelCase_ ) != self.order + 1:
lowerCAmelCase__ = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(lowerCamelCase_ )}"""
)
raise ValueError(lowerCamelCase_ )
if len(lowerCamelCase_ ) != self.order + 1:
lowerCAmelCase__ = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(lowerCamelCase_ )}"""
)
raise ValueError(lowerCamelCase_ )
lowerCAmelCase__ = a_coeffs
lowerCAmelCase__ = b_coeffs
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> float:
lowerCAmelCase__ = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
lowerCAmelCase__ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
lowerCAmelCase__ = self.input_history[:-1]
lowerCAmelCase__ = self.output_history[:-1]
lowerCAmelCase__ = sample
lowerCAmelCase__ = result
return result | 90 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__UpperCAmelCase = logging.getLogger(__name__)
def _snake_case ( A , A , A = None , A = None , A = None , A = None , A = None , A = False , ) -> Union[str, Any]:
lowerCAmelCase__ = bnb_quantization_config.load_in_abit
lowerCAmelCase__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
lowerCAmelCase__ = []
# custom device map
if isinstance(A , A ) and len(device_map.keys() ) > 1:
lowerCAmelCase__ = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase__ = get_keys_to_not_convert(A )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(A )
lowerCAmelCase__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase__ = []
lowerCAmelCase__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(A )
# compatibility with peft
lowerCAmelCase__ = load_in_abit
lowerCAmelCase__ = load_in_abit
lowerCAmelCase__ = get_parameter_device(A )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
lowerCAmelCase__ = replace_with_bnb_layers(A , A , modules_to_not_convert=A )
# convert param to the right dtype
lowerCAmelCase__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCAmelCase__ = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
lowerCAmelCase__ = getattr(A , A , A )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(A ):
param.to(A )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
lowerCAmelCase__ = replace_with_bnb_layers(
A , A , modules_to_not_convert=A )
lowerCAmelCase__ = get_quantized_model_device_map(
A , A , A , max_memory=A , no_split_module_classes=A , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase__ = True
lowerCAmelCase__ = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
A , A , A , dtype=bnb_quantization_config.torch_dtype , offload_folder=A , offload_state_dict=A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(A , device_map=A , offload_dir=A )
def _snake_case ( A , A , A=None , A=None , A=None ) -> List[Any]:
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase__ = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(A , A ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
lowerCAmelCase__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCAmelCase__ = {}
lowerCAmelCase__ = special_dtypes
lowerCAmelCase__ = no_split_module_classes
lowerCAmelCase__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase__ = get_balanced_memory(
A , low_zero=(device_map == '''balanced_low_0''') , max_memory=A , **A , )
lowerCAmelCase__ = max_memory
lowerCAmelCase__ = infer_auto_device_map(A , **A )
if isinstance(A , A ):
# check if don't have any quantized module on the cpu
lowerCAmelCase__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def _snake_case ( A , A , A=None , A=None ) -> Any:
if modules_to_not_convert is None:
lowerCAmelCase__ = []
lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers(
A , A , A , A )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def _snake_case ( A , A , A=None , A=None , ) -> Optional[Any]:
lowerCAmelCase__ = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase__ = []
current_key_name.append(A )
if isinstance(A , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase__ = '''.'''.join(A )
lowerCAmelCase__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
lowerCAmelCase__ = module.weight.data
if module.bias is not None:
lowerCAmelCase__ = module.bias.data
bnb_module.requires_grad_(A )
setattr(A , A , A )
lowerCAmelCase__ = True
if len(list(module.children() ) ) > 0:
lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers(
A , A , A , A )
lowerCAmelCase__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _snake_case ( A ) -> Tuple:
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase__ = deepcopy(A ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase__ = find_tied_parameters(A )
# For compatibility with Accelerate < 0.18
if isinstance(A , A ):
lowerCAmelCase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCAmelCase__ = sum(A , [] )
lowerCAmelCase__ = len(A ) > 0
# Check if it is a base model
lowerCAmelCase__ = False
if hasattr(A , '''base_model_prefix''' ):
lowerCAmelCase__ = not hasattr(A , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase__ = list(model.named_children() )
lowerCAmelCase__ = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase__ = set(A ) - set(A )
lowerCAmelCase__ = list(set(A ) ) + list(A )
# remove ".weight" from the keys
lowerCAmelCase__ = ['''.weight''', '''.bias''']
lowerCAmelCase__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase__ = name.replace(A , '''''' )
filtered_module_names.append(A )
return filtered_module_names
def _snake_case ( A ) -> Optional[int]:
for m in model.modules():
if isinstance(A , bnb.nn.Linearabit ):
return True
return False
def _snake_case ( A ) -> Union[str, Any]:
return next(parameter.parameters() ).device
def _snake_case ( A , A , A , A , A , A , A ) -> Any:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(A , A , 0 , dtype=A , value=A )
lowerCAmelCase__ = param_name
lowerCAmelCase__ = model
if "." in tensor_name:
lowerCAmelCase__ = tensor_name.split('''.''' )
for split in splits[:-1]:
lowerCAmelCase__ = getattr(A , A )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
lowerCAmelCase__ = new_module
lowerCAmelCase__ = splits[-1]
# offload weights
lowerCAmelCase__ = False
offload_weight(module._parameters[tensor_name] , A , A , index=A )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , A , index=A , )
else:
offload_weight(A , A , A , index=A )
offload_weight(A , param_name.replace('''weight''' , '''SCB''' ) , A , index=A )
set_module_tensor_to_device(A , A , '''meta''' , dtype=A , value=torch.empty(*param.size() ) ) | 90 | 1 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('''KEY''')
__UpperCAmelCase = TypeVar('''VAL''')
@dataclass(frozen=a__ , slots=a__ )
class a__ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowercase__ : KEY
lowercase__ : VAL
class a__ ( _Item ):
'''simple docstring'''
def __init__( self ) -> None:
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __bool__( self ) -> bool:
return False
__UpperCAmelCase = _DeletedItem()
class a__ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ = 8 , lowerCamelCase_ = 0.75 ) -> None:
lowerCAmelCase__ = initial_block_size
lowerCAmelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCAmelCase__ = capacity_factor
lowerCAmelCase__ = 0
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return hash(lowerCamelCase_ ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool:
lowerCAmelCase__ = self._buckets[ind]
if not stored:
lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = self._buckets
lowerCAmelCase__ = [None] * new_size
lowerCAmelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Iterator[int]:
lowerCAmelCase__ = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCAmelCase__ = self._get_next_ind(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
break
def __setitem__( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_ , lowerCamelCase_ )
def __delitem__( self , lowerCamelCase_ ) -> None:
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCAmelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , lowerCamelCase_ ) -> VAL:
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
lowerCAmelCase__ = ''' ,'''.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})""" | 90 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _snake_case ( A , A , A , A , A ) -> np.array:
lowerCAmelCase__ = int(np.ceil((x_end - xa) / step_size ) )
lowerCAmelCase__ = np.zeros((n + 1,) )
lowerCAmelCase__ = ya
lowerCAmelCase__ = xa
for k in range(A ):
lowerCAmelCase__ = y[k] + step_size * ode_func(A , y[k] )
lowerCAmelCase__ = y[k] + (
(step_size / 2) * (ode_func(A , y[k] ) + ode_func(x + step_size , A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 90 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=64 , lowerCamelCase_=None ) -> Dict:
lowerCAmelCase__ = np.random.default_rng(lowerCamelCase_ )
lowerCAmelCase__ = length
lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Any:
return self.length
def __getitem__( self , lowerCamelCase_ ) -> List[str]:
return {"x": self.x[i], "y": self.y[i]}
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> List[Any]:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Optional[Any]:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a[0] + self.b[0]
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> Any:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a + self.b
def _snake_case ( A , A = 16 ) -> Any:
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase__ = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
lowerCAmelCase__ = load_dataset('''csv''' , data_files=A )
lowerCAmelCase__ = datasets['''train'''].unique('''label''' )
lowerCAmelCase__ = {v: i for i, v in enumerate(A )}
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A , padding='''max_length''' )
if "label" in examples:
lowerCAmelCase__ = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
A , batched=A , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(A , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=2 )
lowerCAmelCase__ = DataLoader(tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=1 )
return train_dataloader, eval_dataloader | 90 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__UpperCAmelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _snake_case ( A , A , A , A , A ) -> Optional[int]:
for attribute in key.split('''.''' ):
lowerCAmelCase__ = getattr(A , A )
if weight_type is not None:
lowerCAmelCase__ = getattr(A , A ).shape
else:
lowerCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
elif weight_type == "running_mean":
lowerCAmelCase__ = value
elif weight_type == "running_var":
lowerCAmelCase__ = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase__ = value
elif weight_type == "inv_freq":
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( A , A , A ) -> int:
lowerCAmelCase__ = []
lowerCAmelCase__ = fairseq_model.state_dict()
lowerCAmelCase__ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == '''group''' , )
lowerCAmelCase__ = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(A )[0].split('''.''' )[-2]
lowerCAmelCase__ = mapped_key.replace('''*''' , A )
if "pos_bias_u" in name:
lowerCAmelCase__ = None
elif "pos_bias_v" in name:
lowerCAmelCase__ = None
elif "weight_g" in name:
lowerCAmelCase__ = '''weight_g'''
elif "weight_v" in name:
lowerCAmelCase__ = '''weight_v'''
elif "bias" in name:
lowerCAmelCase__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ = '''weight'''
elif "running_mean" in name:
lowerCAmelCase__ = '''running_mean'''
elif "inv_freq" in name:
lowerCAmelCase__ = '''inv_freq'''
elif "running_var" in name:
lowerCAmelCase__ = '''running_var'''
elif "num_batches_tracked" in name:
lowerCAmelCase__ = '''num_batches_tracked'''
else:
lowerCAmelCase__ = None
set_recursively(A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _snake_case ( A , A , A , A , A ) -> Optional[Any]:
lowerCAmelCase__ = full_name.split('''conv_layers.''' )[-1]
lowerCAmelCase__ = name.split('''.''' )
lowerCAmelCase__ = int(items[0] )
lowerCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(A )
@torch.no_grad()
def _snake_case ( A , A , A=None , A=None , A=True ) -> str:
if config_path is not None:
lowerCAmelCase__ = WavaVecaConformerConfig.from_pretrained(A , hidden_act='''swish''' )
else:
lowerCAmelCase__ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCAmelCase__ = '''rotary'''
if is_finetuned:
if dict_path:
lowerCAmelCase__ = Dictionary.load(A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ = target_dict.pad_index
lowerCAmelCase__ = target_dict.bos_index
lowerCAmelCase__ = target_dict.eos_index
lowerCAmelCase__ = len(target_dict.symbols )
lowerCAmelCase__ = os.path.join(A , '''vocab.json''' )
if not os.path.isdir(A ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(A ) )
return
os.makedirs(A , exist_ok=A )
lowerCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
with open(A , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(A , A )
lowerCAmelCase__ = WavaVecaCTCTokenizer(
A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=A , )
lowerCAmelCase__ = True if config.feat_extract_norm == '''layer''' else False
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
lowerCAmelCase__ = WavaVecaProcessor(feature_extractor=A , tokenizer=A )
processor.save_pretrained(A )
lowerCAmelCase__ = WavaVecaConformerForCTC(A )
else:
lowerCAmelCase__ = WavaVecaConformerForPreTraining(A )
if is_finetuned:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowerCAmelCase__ = argparse.Namespace(task='''audio_pretraining''' )
lowerCAmelCase__ = fairseq.tasks.setup_task(A )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A )
lowerCAmelCase__ = model[0].eval()
recursively_load_weights(A , A , not is_finetuned )
hf_wavavec.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__UpperCAmelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 90 |
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__UpperCAmelCase = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def _snake_case ( A , A=None ) -> Optional[Any]:
require_version(deps[pkg] , A ) | 90 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''sentencepiece.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
__UpperCAmelCase = {
'''google/rembert''': 256,
}
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : str = VOCAB_FILES_NAMES
lowercase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_="[CLS]" , lowerCamelCase_="[SEP]" , lowerCamelCase_="[UNK]" , lowerCamelCase_="[SEP]" , lowerCamelCase_="[PAD]" , lowerCamelCase_="[CLS]" , lowerCamelCase_="[MASK]" , **lowerCamelCase_ , ) -> Tuple:
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = remove_space
lowerCAmelCase__ = keep_accents
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = spm.SentencePieceProcessor()
self.sp_model.Load(lowerCamelCase_ )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
return len(self.sp_model )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self , lowerCamelCase_ ) -> str:
lowerCAmelCase__ = d
lowerCAmelCase__ = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=False ) -> int:
lowerCAmelCase__ = self.sp_model.EncodeAsPieces(lowerCamelCase_ )
return pieces
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Any:
return self.sp_model.PieceToId(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Tuple:
return self.sp_model.IdToPiece(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = self.sp_model.decode_pieces(lowerCamelCase_ )
return out_string
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCamelCase_ ) )
return
lowerCAmelCase__ = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
return (out_vocab_file,) | 90 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A , A=False , A=False , A=False ) -> Union[str, Any]:
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def _snake_case ( A , A ) -> List[str]:
for i in range(config.num_hidden_layers ):
lowerCAmelCase__ = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _snake_case ( A ) -> List[str]:
lowerCAmelCase__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
def _snake_case ( A , A , A ) -> str:
lowerCAmelCase__ = dct.pop(A )
lowerCAmelCase__ = val
@torch.no_grad()
def _snake_case ( A , A ) -> Any:
lowerCAmelCase__ = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=A )
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
if "vqa" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = 3129
lowerCAmelCase__ = '''huggingface/label-files'''
lowerCAmelCase__ = '''vqa2-id2label.json'''
lowerCAmelCase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase__ = {int(A ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ = ViltForQuestionAnswering(A )
elif "nlvr" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = 2
lowerCAmelCase__ = {0: '''False''', 1: '''True'''}
lowerCAmelCase__ = {v: k for k, v in config.idalabel.items()}
lowerCAmelCase__ = 3
lowerCAmelCase__ = ViltForImagesAndTextClassification(A )
elif "irtr" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = ViltForImageAndTextRetrieval(A )
elif "mlm_itm" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = ViltForMaskedLM(A )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' )['''state_dict''']
lowerCAmelCase__ = create_rename_keys(A , A , A , A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , A )
if mlm_model or irtr_model:
lowerCAmelCase__ = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCAmelCase__ , lowerCAmelCase__ = model.load_state_dict(A , strict=A )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(A )
# Define processor
lowerCAmelCase__ = ViltImageProcessor(size=384 )
lowerCAmelCase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowerCAmelCase__ = ViltProcessor(A , A )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
lowerCAmelCase__ = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCAmelCase__ = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=A ).raw )
if mlm_model:
lowerCAmelCase__ = '''a bunch of [MASK] laying on a [MASK].'''
else:
lowerCAmelCase__ = '''How many cats are there?'''
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = model(**A )
# Verify outputs
if mlm_model:
lowerCAmelCase__ = torch.Size([1, 11, 30522] )
lowerCAmelCase__ = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCAmelCase__ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCAmelCase__ = torch.Size([1, 3129] )
lowerCAmelCase__ = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify vqa prediction equals "2"
lowerCAmelCase__ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCAmelCase__ = torch.Size([1, 2] )
lowerCAmelCase__ = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(A ).mkdir(exist_ok=A )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
processor.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__UpperCAmelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 90 | 1 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def _snake_case ( A , A , A , A ) -> Dict:
lowerCAmelCase__ = sorted(zip(A , A ) , key=lambda A : x[0] / x[1] , reverse=A )
lowerCAmelCase__ , lowerCAmelCase__ = [i[0] for i in r], [i[1] for i in r]
lowerCAmelCase__ = list(accumulate(A ) )
lowerCAmelCase__ = bisect(A , A )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 |
'''simple docstring'''
import re
def _snake_case ( A ) -> bool:
lowerCAmelCase__ = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(A , A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895''')) | 90 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> None:
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ ) | 90 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.txt'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
__UpperCAmelCase = {
'''facebook/esm2_t6_8M_UR50D''': 1_024,
'''facebook/esm2_t12_35M_UR50D''': 1_024,
}
def _snake_case ( A ) -> Optional[Any]:
with open(A , '''r''' ) as f:
lowerCAmelCase__ = f.read().splitlines()
return [l.strip() for l in lines]
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_="<eos>" , **lowerCamelCase_ , ) -> Tuple:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = load_vocab_file(lowerCamelCase_ )
lowerCAmelCase__ = dict(enumerate(self.all_tokens ) )
lowerCAmelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowerCAmelCase__ = unk_token
lowerCAmelCase__ = cls_token
lowerCAmelCase__ = pad_token
lowerCAmelCase__ = mask_token
lowerCAmelCase__ = eos_token
lowerCAmelCase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]:
return text.split()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False ) -> Dict:
return len(self._id_to_token )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return {token: i for i, token in enumerate(self.all_tokens )}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowerCAmelCase__ = [1] + ([0] * len(lowerCamelCase_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCamelCase_ ) + [1]
return mask
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = os.path.join(lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self.get_vocab_size(with_added_tokens=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False ) -> int:
return super()._add_tokens(lowerCamelCase_ , special_tokens=lowerCamelCase_ ) | 90 | 1 |
'''simple docstring'''
from __future__ import annotations
import queue
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = data
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def _snake_case ( ) -> TreeNode:
print('''\n********Press N to stop entering at any point of time********\n''' )
lowerCAmelCase__ = input('''Enter the value of the root node: ''' ).strip().lower()
lowerCAmelCase__ = queue.Queue()
lowerCAmelCase__ = TreeNode(int(A ) )
q.put(A )
while not q.empty():
lowerCAmelCase__ = q.get()
lowerCAmelCase__ = F"""Enter the left node of {node_found.data}: """
lowerCAmelCase__ = input(A ).strip().lower() or '''n'''
if check == "n":
return tree_node
lowerCAmelCase__ = TreeNode(int(A ) )
lowerCAmelCase__ = left_node
q.put(A )
lowerCAmelCase__ = F"""Enter the right node of {node_found.data}: """
lowerCAmelCase__ = input(A ).strip().lower() or '''n'''
if check == "n":
return tree_node
lowerCAmelCase__ = TreeNode(int(A ) )
lowerCAmelCase__ = right_node
q.put(A )
raise
def _snake_case ( A ) -> None:
if not isinstance(A , A ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def _snake_case ( A ) -> None:
if not isinstance(A , A ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def _snake_case ( A ) -> None:
if not isinstance(A , A ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def _snake_case ( A ) -> None:
if not isinstance(A , A ) or not node:
return
lowerCAmelCase__ = queue.Queue()
q.put(A )
while not q.empty():
lowerCAmelCase__ = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _snake_case ( A ) -> None:
if not isinstance(A , A ) or not node:
return
lowerCAmelCase__ = queue.Queue()
q.put(A )
while not q.empty():
lowerCAmelCase__ = []
while not q.empty():
lowerCAmelCase__ = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(A )
def _snake_case ( A ) -> None:
if not isinstance(A , A ) or not node:
return
lowerCAmelCase__ = []
lowerCAmelCase__ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(A )
lowerCAmelCase__ = n.left
# end of while means current node doesn't have left child
lowerCAmelCase__ = stack.pop()
# start to traverse its right child
lowerCAmelCase__ = n.right
def _snake_case ( A ) -> None:
if not isinstance(A , A ) or not node:
return
lowerCAmelCase__ = []
lowerCAmelCase__ = node
while n or stack:
while n:
stack.append(A )
lowerCAmelCase__ = n.left
lowerCAmelCase__ = stack.pop()
print(n.data , end=''',''' )
lowerCAmelCase__ = n.right
def _snake_case ( A ) -> None:
if not isinstance(A , A ) or not node:
return
lowerCAmelCase__ , lowerCAmelCase__ = [], []
lowerCAmelCase__ = node
stacka.append(A )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCAmelCase__ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(A )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def _snake_case ( A = "" , A=50 , A="*" ) -> str:
if not s:
return "\n" + width * char
lowerCAmelCase__ , lowerCAmelCase__ = divmod(width - len(A ) - 2 , 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
__UpperCAmelCase = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 50 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt()) | 90 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = AltDiffusionPipeline
lowercase__ : Dict = TEXT_TO_IMAGE_PARAMS
lowercase__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __SCREAMING_SNAKE_CASE ( self ) -> str:
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowerCAmelCase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
lowerCAmelCase__ = CLIPTextModel(lowerCamelCase_ )
lowerCAmelCase__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCAmelCase__ = 77
lowerCAmelCase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> List[str]:
if str(lowerCamelCase_ ).startswith('''mps''' ):
lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ )
else:
lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCAmelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase__ = '''A photo of an astronaut'''
lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''numpy''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 90 | 1 |
'''simple docstring'''
__UpperCAmelCase = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__UpperCAmelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__UpperCAmelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
} | 90 |
'''simple docstring'''
def _snake_case ( A , A ) -> int:
return x if y == 0 else greatest_common_divisor(A , x % y )
def _snake_case ( A , A ) -> int:
return (x * y) // greatest_common_divisor(A , A )
def _snake_case ( A = 20 ) -> int:
lowerCAmelCase__ = 1
for i in range(1 , n + 1 ):
lowerCAmelCase__ = lcm(A , A )
return g
if __name__ == "__main__":
print(f"""{solution() = }""") | 90 | 1 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class a__ ( nn.Module ):
'''simple docstring'''
lowercase__ : int
lowercase__ : jnp.dtype = jnp.floataa
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = hidden_states.shape
lowerCAmelCase__ = jax.image.resize(
lowerCamelCase_ , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
lowerCAmelCase__ = self.conv(lowerCamelCase_ )
return hidden_states
class a__ ( nn.Module ):
'''simple docstring'''
lowercase__ : int
lowercase__ : jnp.dtype = jnp.floataa
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCamelCase_ ) -> List[Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
lowerCAmelCase__ = self.conv(lowerCamelCase_ )
return hidden_states
class a__ ( nn.Module ):
'''simple docstring'''
lowercase__ : int
lowercase__ : int = None
lowercase__ : float = 0.0
lowercase__ : bool = None
lowercase__ : jnp.dtype = jnp.floataa
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.in_channels if self.out_channels is None else self.out_channels
lowerCAmelCase__ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCAmelCase__ = nn.Conv(
lowerCamelCase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCAmelCase__ = nn.Dense(lowerCamelCase_ , dtype=self.dtype )
lowerCAmelCase__ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCAmelCase__ = nn.Dropout(self.dropout_prob )
lowerCAmelCase__ = nn.Conv(
lowerCamelCase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCAmelCase__ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowerCAmelCase__ = None
if use_nin_shortcut:
lowerCAmelCase__ = nn.Conv(
lowerCamelCase_ , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=True ) -> Any:
lowerCAmelCase__ = hidden_states
lowerCAmelCase__ = self.norma(lowerCamelCase_ )
lowerCAmelCase__ = nn.swish(lowerCamelCase_ )
lowerCAmelCase__ = self.conva(lowerCamelCase_ )
lowerCAmelCase__ = self.time_emb_proj(nn.swish(lowerCamelCase_ ) )
lowerCAmelCase__ = jnp.expand_dims(jnp.expand_dims(lowerCamelCase_ , 1 ) , 1 )
lowerCAmelCase__ = hidden_states + temb
lowerCAmelCase__ = self.norma(lowerCamelCase_ )
lowerCAmelCase__ = nn.swish(lowerCamelCase_ )
lowerCAmelCase__ = self.dropout(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = self.conva(lowerCamelCase_ )
if self.conv_shortcut is not None:
lowerCAmelCase__ = self.conv_shortcut(lowerCamelCase_ )
return hidden_states + residual | 90 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCAmelCase = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def _snake_case ( A , A=None , A=None , A=None ) -> Union[str, Any]:
lowerCAmelCase__ = True
while ask_again:
lowerCAmelCase__ = input(A )
try:
if default is not None and len(A ) == 0:
return default
return convert_value(A ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(A )
def _snake_case ( A , A=[] , A=None , A=0 ) -> List[Any]:
lowerCAmelCase__ = BulletMenu(A , A )
lowerCAmelCase__ = menu.run(default_choice=A )
return convert_value(A ) if convert_value is not None else result
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = int(A )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = int(A )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def _snake_case ( A ) -> str:
lowerCAmelCase__ = int(A )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = int(A )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = int(A )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def _snake_case ( A ) -> List[str]:
return {"yes": True, "no": False}[value.lower()]
class a__ ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = super()._format_usage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = usage.replace('''<command> [<args>] ''' , '''''' )
return usage | 90 | 1 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a__ ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = AutoencoderKL
lowercase__ : Any = "sample"
lowercase__ : Optional[int] = 1e-2
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = 4
lowerCAmelCase__ = 3
lowerCAmelCase__ = (32, 32)
lowerCAmelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ )
return {"sample": image}
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return (3, 32, 32)
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
return (3, 32, 32)
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
lowerCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# enable deterministic behavior for gradient checkpointing
lowerCAmelCase__ , lowerCAmelCase__ = self.prepare_init_args_and_inputs_for_common()
lowerCAmelCase__ = self.model_class(**lowerCamelCase_ )
model.to(lowerCamelCase_ )
assert not model.is_gradient_checkpointing and model.training
lowerCAmelCase__ = model(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowerCAmelCase__ = torch.randn_like(lowerCamelCase_ )
lowerCAmelCase__ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowerCAmelCase__ = self.model_class(**lowerCamelCase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowerCAmelCase__ = model_a(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowerCAmelCase__ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
lowerCAmelCase__ = dict(model.named_parameters() )
lowerCAmelCase__ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ , lowerCAmelCase__ = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(lowerCamelCase_ )
lowerCAmelCase__ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
lowerCAmelCase__ = model.to(lowerCamelCase_ )
model.eval()
if torch_device == "mps":
lowerCAmelCase__ = torch.manual_seed(0 )
else:
lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
lowerCAmelCase__ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCAmelCase__ = image.to(lowerCamelCase_ )
with torch.no_grad():
lowerCAmelCase__ = model(lowerCamelCase_ , sample_posterior=lowerCamelCase_ , generator=lowerCamelCase_ ).sample
lowerCAmelCase__ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowerCAmelCase__ = torch.tensor(
[
-4.00_78e-01,
-3.83_23e-04,
-1.26_81e-01,
-1.14_62e-01,
2.00_95e-01,
1.08_93e-01,
-8.82_47e-02,
-3.03_61e-01,
-9.86_44e-03,
] )
elif torch_device == "cpu":
lowerCAmelCase__ = torch.tensor(
[-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] )
else:
lowerCAmelCase__ = torch.tensor(
[-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] )
self.assertTrue(torch_all_close(lowerCamelCase_ , lowerCamelCase_ , rtol=1e-2 ) )
@slow
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
return F"""gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase_ ) for s in shape] )}.npy"""
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=0 , lowerCamelCase_=(4, 3, 5_12, 5_12) , lowerCamelCase_=False ) -> Optional[Any]:
lowerCAmelCase__ = torch.floataa if fpaa else torch.floataa
lowerCAmelCase__ = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase_ , lowerCamelCase_ ) ) ).to(lowerCamelCase_ ).to(lowerCamelCase_ )
return image
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_="CompVis/stable-diffusion-v1-4" , lowerCamelCase_=False ) -> List[Any]:
lowerCAmelCase__ = '''fp16''' if fpaa else None
lowerCAmelCase__ = torch.floataa if fpaa else torch.floataa
lowerCAmelCase__ = AutoencoderKL.from_pretrained(
lowerCamelCase_ , subfolder='''vae''' , torch_dtype=lowerCamelCase_ , revision=lowerCamelCase_ , )
model.to(lowerCamelCase_ ).eval()
return model
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=0 ) -> Optional[int]:
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase_ )
return torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
lowerCAmelCase__ = self.get_sd_vae_model()
lowerCAmelCase__ = self.get_sd_image(lowerCamelCase_ )
lowerCAmelCase__ = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
lowerCAmelCase__ = model(lowerCamelCase_ , generator=lowerCamelCase_ , sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
lowerCAmelCase__ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowerCAmelCase__ = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]],
[47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]],
# fmt: on
] )
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
lowerCAmelCase__ = self.get_sd_image(lowerCamelCase_ , fpaa=lowerCamelCase_ )
lowerCAmelCase__ = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
lowerCAmelCase__ = model(lowerCamelCase_ , generator=lowerCamelCase_ , sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
lowerCAmelCase__ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowerCAmelCase__ = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
lowerCAmelCase__ = self.get_sd_vae_model()
lowerCAmelCase__ = self.get_sd_image(lowerCamelCase_ )
with torch.no_grad():
lowerCAmelCase__ = model(lowerCamelCase_ ).sample
assert sample.shape == image.shape
lowerCAmelCase__ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowerCAmelCase__ = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]],
[37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]],
# fmt: on
] )
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
lowerCAmelCase__ = self.get_sd_vae_model()
lowerCAmelCase__ = self.get_sd_image(lowerCamelCase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowerCAmelCase__ = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
lowerCAmelCase__ = sample[-1, -2:, :2, -2:].flatten().cpu()
lowerCAmelCase__ = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]],
[16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]],
# fmt: on
] )
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
lowerCAmelCase__ = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
lowerCAmelCase__ = self.get_sd_image(lowerCamelCase_ , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase_ )
with torch.no_grad():
lowerCAmelCase__ = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
lowerCAmelCase__ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowerCAmelCase__ = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
lowerCAmelCase__ = self.get_sd_image(lowerCamelCase_ , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase_ )
with torch.no_grad():
lowerCAmelCase__ = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowerCAmelCase__ = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[int]:
lowerCAmelCase__ = self.get_sd_vae_model()
lowerCAmelCase__ = self.get_sd_image(lowerCamelCase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowerCAmelCase__ = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowerCAmelCase__ = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]],
[47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]],
# fmt: on
] )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
lowerCAmelCase__ = self.get_sd_vae_model()
lowerCAmelCase__ = self.get_sd_image(lowerCamelCase_ )
lowerCAmelCase__ = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
lowerCAmelCase__ = model.encode(lowerCamelCase_ ).latent_dist
lowerCAmelCase__ = dist.sample(generator=lowerCamelCase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowerCAmelCase__ = sample[0, -1, -3:, -3:].flatten().cpu()
lowerCAmelCase__ = torch.tensor(lowerCamelCase_ )
lowerCAmelCase__ = 3e-3 if torch_device != '''mps''' else 1e-2
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=lowerCamelCase_ ) | 90 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : torch.FloatTensor
class a__ ( a__ , a__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCamelCase_ = 3 , lowerCamelCase_ = 3 , lowerCamelCase_ = ("DownEncoderBlock2D",) , lowerCamelCase_ = ("UpDecoderBlock2D",) , lowerCamelCase_ = (64,) , lowerCamelCase_ = 1 , lowerCamelCase_ = "silu" , lowerCamelCase_ = 3 , lowerCamelCase_ = 32 , lowerCamelCase_ = 2_56 , lowerCamelCase_ = 32 , lowerCamelCase_ = None , lowerCamelCase_ = 0.18_215 , lowerCamelCase_ = "group" , ) -> Union[str, Any]:
super().__init__()
# pass init params to Encoder
lowerCAmelCase__ = Encoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , down_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , double_z=lowerCamelCase_ , )
lowerCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
lowerCAmelCase__ = VectorQuantizer(lowerCamelCase_ , lowerCamelCase_ , beta=0.25 , remap=lowerCamelCase_ , sane_index_shape=lowerCamelCase_ )
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
# pass init params to Decoder
lowerCAmelCase__ = Decoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , up_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , norm_type=lowerCamelCase_ , )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> VQEncoderOutput:
lowerCAmelCase__ = self.encoder(lowerCamelCase_ )
lowerCAmelCase__ = self.quant_conv(lowerCamelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCamelCase_ )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.quantize(lowerCamelCase_ )
else:
lowerCAmelCase__ = h
lowerCAmelCase__ = self.post_quant_conv(lowerCamelCase_ )
lowerCAmelCase__ = self.decoder(lowerCamelCase_ , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase__ = sample
lowerCAmelCase__ = self.encode(lowerCamelCase_ ).latents
lowerCAmelCase__ = self.decode(lowerCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ ) | 90 | 1 |
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _snake_case ( A , A ) -> Tuple:
lowerCAmelCase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowerCAmelCase__ = Image.open(requests.get(A , stream=A ).raw ).convert('''RGB''' )
lowerCAmelCase__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
lowerCAmelCase__ = transform(A ).unsqueeze(0 ).to(A )
return image
def _snake_case ( A ) -> Optional[int]:
if "visual_encoder" in key:
lowerCAmelCase__ = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , A )
if "blocks" in key:
lowerCAmelCase__ = re.sub(R'''blocks''' , '''layers''' , A )
if "attn" in key:
lowerCAmelCase__ = re.sub(R'''attn''' , '''self_attn''' , A )
if "norm1" in key:
lowerCAmelCase__ = re.sub(R'''norm1''' , '''layer_norm1''' , A )
if "norm2" in key:
lowerCAmelCase__ = re.sub(R'''norm2''' , '''layer_norm2''' , A )
if "encoder.norm" in key:
lowerCAmelCase__ = re.sub(R'''encoder.norm''' , '''post_layernorm''' , A )
if "encoder.patch_embed.proj" in key:
lowerCAmelCase__ = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , A )
if "encoder.pos_embed" in key:
lowerCAmelCase__ = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , A )
if "encoder.cls_token" in key:
lowerCAmelCase__ = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , A )
if "self_attn" in key:
lowerCAmelCase__ = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , A )
return key
@torch.no_grad()
def _snake_case ( A , A=None ) -> str:
if config_path is not None:
lowerCAmelCase__ = BlipConfig.from_pretrained(A )
else:
lowerCAmelCase__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowerCAmelCase__ = BlipForConditionalGeneration(A ).eval()
lowerCAmelCase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowerCAmelCase__ = blip_decoder(pretrained=A , image_size=384 , vit='''base''' )
lowerCAmelCase__ = pt_model.eval()
lowerCAmelCase__ = pt_model.state_dict()
for key in modified_state_dict.copy():
lowerCAmelCase__ = modified_state_dict.pop(A )
lowerCAmelCase__ = rename_key(A )
lowerCAmelCase__ = value
hf_model.load_state_dict(A )
lowerCAmelCase__ = 384
lowerCAmelCase__ = load_demo_image(image_size=A , device='''cpu''' )
lowerCAmelCase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowerCAmelCase__ = tokenizer(['''a picture of'''] ).input_ids
lowerCAmelCase__ = hf_model.generate(A , A )
assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowerCAmelCase__ = hf_model.generate(A )
assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(A )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowerCAmelCase__ = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowerCAmelCase__ = blip_vqa(pretrained=A , image_size=A , vit='''base''' )
vqa_model.eval()
lowerCAmelCase__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowerCAmelCase__ = modified_state_dict.pop(A )
lowerCAmelCase__ = rename_key(A )
lowerCAmelCase__ = value
lowerCAmelCase__ = BlipForQuestionAnswering(A )
hf_vqa_model.load_state_dict(A )
lowerCAmelCase__ = ['''How many dogs are in this image?''']
lowerCAmelCase__ = tokenizer(A , return_tensors='''pt''' ).input_ids
lowerCAmelCase__ = hf_vqa_model.generate(A , A )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowerCAmelCase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowerCAmelCase__ = blip_itm(pretrained=A , image_size=A , vit='''base''' )
itm_model.eval()
lowerCAmelCase__ = itm_model.state_dict()
for key in modified_state_dict.copy():
lowerCAmelCase__ = modified_state_dict.pop(A )
lowerCAmelCase__ = rename_key(A )
lowerCAmelCase__ = value
lowerCAmelCase__ = BlipForImageTextRetrieval(A )
lowerCAmelCase__ = ['''A picture of a woman with a dog sitting in a beach''']
lowerCAmelCase__ = tokenizer(
A , return_tensors='''pt''' , padding='''max_length''' , truncation=A , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(A )
hf_itm_model.eval()
lowerCAmelCase__ = hf_itm_model(A , A , use_itm_head=A )
lowerCAmelCase__ = hf_itm_model(A , A , use_itm_head=A )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
__UpperCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 90 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = list[list[int]]
# assigning initial values to the grid
__UpperCAmelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCAmelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _snake_case ( A , A , A , A ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _snake_case ( A ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _snake_case ( A ) -> Matrix | None:
if location := find_empty_location(A ):
lowerCAmelCase__ , lowerCAmelCase__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A , A , A , A ):
lowerCAmelCase__ = digit
if sudoku(A ) is not None:
return grid
lowerCAmelCase__ = 0
return None
def _snake_case ( A ) -> None:
for row in grid:
for cell in row:
print(A , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__UpperCAmelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''') | 90 | 1 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( A , A , A , A , A = 16 ) -> Optional[Any]:
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase__ = DatasetDict(
{
'''train''': dataset['''train'''].select(A ),
'''validation''': dataset['''train'''].select(A ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ = datasets.map(
A , batched=A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ = 8
else:
lowerCAmelCase__ = None
return tokenizer.pad(
A , padding='''longest''' , max_length=A , pad_to_multiple_of=A , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=A )
lowerCAmelCase__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=A )
lowerCAmelCase__ = DataLoader(
tokenized_datasets['''test'''] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader, test_dataloader
def _snake_case ( A , A ) -> int:
# New Code #
lowerCAmelCase__ = []
# Download the dataset
lowerCAmelCase__ = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
lowerCAmelCase__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowerCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ = config['''lr''']
lowerCAmelCase__ = int(config['''num_epochs'''] )
lowerCAmelCase__ = int(config['''seed'''] )
lowerCAmelCase__ = int(config['''batch_size'''] )
lowerCAmelCase__ = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase__ = MAX_GPU_BATCH_SIZE
set_seed(A )
# New Code #
# Create our folds:
lowerCAmelCase__ = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
lowerCAmelCase__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(A ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_fold_dataloaders(
A , A , A , A , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ = AdamW(params=model.parameters() , lr=A )
# Instantiate scheduler
lowerCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=100 , num_training_steps=(len(A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(
A , A , A , A , A )
# Now we train the model
for epoch in range(A ):
model.train()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase__ = model(**A )
lowerCAmelCase__ = outputs.loss
lowerCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ = model(**A )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=A , references=A , )
lowerCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , A )
# New Code #
# We also run predictions on the test set at the very end
lowerCAmelCase__ = []
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ = model(**A )
lowerCAmelCase__ = outputs.logits
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(A , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowerCAmelCase__ = torch.cat(A , dim=0 )
lowerCAmelCase__ = torch.stack(A , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowerCAmelCase__ = metric.compute(predictions=A , references=A )
accelerator.print('''Average test metrics from all folds:''' , A )
def _snake_case ( ) -> List[str]:
lowerCAmelCase__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=A , default=A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=A , default=3 , help='''The number of splits to perform across the dataset''' )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(A , A )
if __name__ == "__main__":
main() | 90 |
'''simple docstring'''
def _snake_case ( A ) -> int:
if n == 1 or not isinstance(A , A ):
return 0
elif n == 2:
return 1
else:
lowerCAmelCase__ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _snake_case ( A ) -> int:
lowerCAmelCase__ = 0
lowerCAmelCase__ = 2
while digits < n:
index += 1
lowerCAmelCase__ = len(str(fibonacci(A ) ) )
return index
def _snake_case ( A = 1000 ) -> int:
return fibonacci_digits_index(A )
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 90 | 1 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _snake_case ( A , A , A ) -> List[Any]:
lowerCAmelCase__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCAmelCase__ = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
lowerCAmelCase__ = F"""{src_lang}-{tgt_lang}"""
lowerCAmelCase__ = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(A , exist_ok=A )
lowerCAmelCase__ = os.path.join(A , '''README.md''' )
print(F"""Generating {path}""" )
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(A )
# make sure we are under the root of the project
__UpperCAmelCase = Path(__file__).resolve().parent.parent.parent
__UpperCAmelCase = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = model_name.split('''-''')
__UpperCAmelCase = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang) | 90 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def _snake_case ( A ) -> int:
return choice(A )
def _snake_case ( A , A ) -> int:
lowerCAmelCase__ = random_pivot(A )
# partition based on pivot
# linear time
lowerCAmelCase__ = [e for e in lst if e < pivot]
lowerCAmelCase__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(A ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(A ) < k - 1:
return kth_number(A , k - len(A ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(A , A )
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 | 1 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__UpperCAmelCase = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase = '''MobileNetV1Config'''
# Base docstring
__UpperCAmelCase = '''google/mobilenet_v1_1.0_224'''
__UpperCAmelCase = [1, 1_024, 7, 7]
# Image classification docstring
__UpperCAmelCase = '''google/mobilenet_v1_1.0_224'''
__UpperCAmelCase = '''tabby, tabby cat'''
__UpperCAmelCase = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _snake_case ( A , A , A=None ) -> Optional[int]:
lowerCAmelCase__ = {}
if isinstance(A , A ):
lowerCAmelCase__ = model.mobilenet_va
else:
lowerCAmelCase__ = model
lowerCAmelCase__ = '''MobilenetV1/Conv2d_0/'''
lowerCAmelCase__ = backbone.conv_stem.convolution.weight
lowerCAmelCase__ = backbone.conv_stem.normalization.bias
lowerCAmelCase__ = backbone.conv_stem.normalization.weight
lowerCAmelCase__ = backbone.conv_stem.normalization.running_mean
lowerCAmelCase__ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
lowerCAmelCase__ = i + 1
lowerCAmelCase__ = i * 2
lowerCAmelCase__ = backbone.layer[pt_index]
lowerCAmelCase__ = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
lowerCAmelCase__ = pointer.convolution.weight
lowerCAmelCase__ = pointer.normalization.bias
lowerCAmelCase__ = pointer.normalization.weight
lowerCAmelCase__ = pointer.normalization.running_mean
lowerCAmelCase__ = pointer.normalization.running_var
lowerCAmelCase__ = backbone.layer[pt_index + 1]
lowerCAmelCase__ = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
lowerCAmelCase__ = pointer.convolution.weight
lowerCAmelCase__ = pointer.normalization.bias
lowerCAmelCase__ = pointer.normalization.weight
lowerCAmelCase__ = pointer.normalization.running_mean
lowerCAmelCase__ = pointer.normalization.running_var
if isinstance(A , A ):
lowerCAmelCase__ = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
lowerCAmelCase__ = model.classifier.weight
lowerCAmelCase__ = model.classifier.bias
return tf_to_pt_map
def _snake_case ( A , A , A ) -> Any:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
lowerCAmelCase__ = tf.train.list_variables(A )
lowerCAmelCase__ = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
lowerCAmelCase__ = tf.train.load_variable(A , A )
lowerCAmelCase__ = array
# Build TF to PyTorch weights loading map
lowerCAmelCase__ = _build_tf_to_pytorch_map(A , A , A )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
lowerCAmelCase__ = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
lowerCAmelCase__ = np.transpose(A , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
lowerCAmelCase__ = array.squeeze().transpose()
else:
lowerCAmelCase__ = np.transpose(A , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
lowerCAmelCase__ = torch.from_numpy(A )
tf_weights.pop(A , A )
tf_weights.pop(name + '''/RMSProp''' , A )
tf_weights.pop(name + '''/RMSProp_1''' , A )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , A )
logger.info(F"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def _snake_case ( A , A ) -> torch.Tensor:
lowerCAmelCase__ , lowerCAmelCase__ = features.shape[-2:]
lowerCAmelCase__ , lowerCAmelCase__ = conv_layer.stride
lowerCAmelCase__ , lowerCAmelCase__ = conv_layer.kernel_size
if in_height % stride_height == 0:
lowerCAmelCase__ = max(kernel_height - stride_height , 0 )
else:
lowerCAmelCase__ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
lowerCAmelCase__ = max(kernel_width - stride_width , 0 )
else:
lowerCAmelCase__ = max(kernel_width - (in_width % stride_width) , 0 )
lowerCAmelCase__ = pad_along_width // 2
lowerCAmelCase__ = pad_along_width - pad_left
lowerCAmelCase__ = pad_along_height // 2
lowerCAmelCase__ = pad_along_height - pad_top
lowerCAmelCase__ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(A , A , '''constant''' , 0.0 )
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 , lowerCamelCase_ = 1 , lowerCamelCase_ = False , lowerCamelCase_ = True , lowerCamelCase_ = True , ) -> None:
super().__init__()
lowerCAmelCase__ = config
if in_channels % groups != 0:
raise ValueError(F"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(F"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
lowerCAmelCase__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowerCAmelCase__ = nn.Convad(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , kernel_size=lowerCamelCase_ , stride=lowerCamelCase_ , padding=lowerCamelCase_ , groups=lowerCamelCase_ , bias=lowerCamelCase_ , padding_mode='''zeros''' , )
if use_normalization:
lowerCAmelCase__ = nn.BatchNormad(
num_features=lowerCamelCase_ , eps=config.layer_norm_eps , momentum=0.9_997 , affine=lowerCamelCase_ , track_running_stats=lowerCamelCase_ , )
else:
lowerCAmelCase__ = None
if use_activation:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowerCamelCase_ ):
lowerCAmelCase__ = ACTaFN[config.hidden_act]
else:
lowerCAmelCase__ = config.hidden_act
else:
lowerCAmelCase__ = None
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> torch.Tensor:
if self.config.tf_padding:
lowerCAmelCase__ = apply_tf_padding(lowerCamelCase_ , self.convolution )
lowerCAmelCase__ = self.convolution(lowerCamelCase_ )
if self.normalization is not None:
lowerCAmelCase__ = self.normalization(lowerCamelCase_ )
if self.activation is not None:
lowerCAmelCase__ = self.activation(lowerCamelCase_ )
return features
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : int = MobileNetVaConfig
lowercase__ : Optional[Any] = load_tf_weights_in_mobilenet_va
lowercase__ : str = "mobilenet_v1"
lowercase__ : Dict = "pixel_values"
lowercase__ : Union[str, Any] = False
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None:
if isinstance(lowerCamelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCamelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__UpperCAmelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__UpperCAmelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , a__ , )
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> Dict:
super().__init__(lowerCamelCase_ )
lowerCAmelCase__ = config
lowerCAmelCase__ = 32
lowerCAmelCase__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowerCAmelCase__ = MobileNetVaConvLayer(
lowerCamelCase_ , in_channels=config.num_channels , out_channels=lowerCamelCase_ , kernel_size=3 , stride=2 , )
lowerCAmelCase__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowerCAmelCase__ = nn.ModuleList()
for i in range(13 ):
lowerCAmelCase__ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowerCAmelCase__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
lowerCamelCase_ , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , kernel_size=3 , stride=strides[i] , groups=lowerCamelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
lowerCamelCase_ , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , kernel_size=1 , ) )
lowerCAmelCase__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Any:
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
lowerCAmelCase__ = self.conv_stem(lowerCamelCase_ )
lowerCAmelCase__ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowerCAmelCase__ = layer_module(lowerCamelCase_ )
if output_hidden_states:
lowerCAmelCase__ = all_hidden_states + (hidden_states,)
lowerCAmelCase__ = hidden_states
if self.pooler is not None:
lowerCAmelCase__ = torch.flatten(self.pooler(lowerCamelCase_ ) , start_dim=1 )
else:
lowerCAmelCase__ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase_ , pooler_output=lowerCamelCase_ , hidden_states=lowerCamelCase_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , a__ , )
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> None:
super().__init__(lowerCamelCase_ )
lowerCAmelCase__ = config.num_labels
lowerCAmelCase__ = MobileNetVaModel(lowerCamelCase_ )
lowerCAmelCase__ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowerCAmelCase__ = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCamelCase_ )
lowerCAmelCase__ = nn.Linear(lowerCamelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.mobilenet_va(lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ )
lowerCAmelCase__ = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ = self.classifier(self.dropout(lowerCamelCase_ ) )
lowerCAmelCase__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase__ = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase__ = '''single_label_classification'''
else:
lowerCAmelCase__ = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowerCAmelCase__ = MSELoss()
if self.num_labels == 1:
lowerCAmelCase__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase__ = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase__ = CrossEntropyLoss()
lowerCAmelCase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase__ = BCEWithLogitsLoss()
lowerCAmelCase__ = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
if not return_dict:
lowerCAmelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowerCamelCase_ , logits=lowerCamelCase_ , hidden_states=outputs.hidden_states , ) | 90 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 90 | 1 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
__UpperCAmelCase = '''src/transformers'''
# Matches is_xxx_available()
__UpperCAmelCase = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
__UpperCAmelCase = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__UpperCAmelCase = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
__UpperCAmelCase = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
__UpperCAmelCase = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__UpperCAmelCase = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
__UpperCAmelCase = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
__UpperCAmelCase = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
__UpperCAmelCase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
__UpperCAmelCase = re.compile(R'''^\s*try:''')
# Catches a line with else:
__UpperCAmelCase = re.compile(R'''^\s*else:''')
def _snake_case ( A ) -> List[str]:
if _re_test_backend.search(A ) is None:
return None
lowerCAmelCase__ = [b[0] for b in _re_backend.findall(A )]
backends.sort()
return "_and_".join(A )
def _snake_case ( A ) -> Optional[int]:
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = 0
while line_index < len(A ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(A ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(A ):
lowerCAmelCase__ = _re_one_line_import_struct.search(A ).groups()[0]
lowerCAmelCase__ = re.findall('''\[([^\]]+)\]''' , A )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowerCAmelCase__ = _re_import_struct_key_value.search(A )
if single_line_import_search is not None:
lowerCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(A ) > 0]
objects.extend(A )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowerCAmelCase__ = lines[line_index]
if _re_import_struct_add_one.search(A ) is not None:
objects.append(_re_import_struct_add_one.search(A ).groups()[0] )
elif _re_import_struct_add_many.search(A ) is not None:
lowerCAmelCase__ = _re_import_struct_add_many.search(A ).groups()[0].split(''', ''' )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(A ) > 0]
objects.extend(A )
elif _re_between_brackets.search(A ) is not None:
lowerCAmelCase__ = _re_between_brackets.search(A ).groups()[0].split(''', ''' )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(A ) > 0]
objects.extend(A )
elif _re_quote_object.search(A ) is not None:
objects.append(_re_quote_object.search(A ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ = []
while (
line_index < len(A )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(A ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _snake_case ( A , A ) -> str:
def find_duplicates(A ):
return [k for k, v in collections.Counter(A ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ = []
for key in import_dict_objects.keys():
lowerCAmelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowerCAmelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def _snake_case ( ) -> int:
lowerCAmelCase__ = []
for root, _, files in os.walk(A ):
if "__init__.py" in files:
lowerCAmelCase__ = os.path.join(A , '''__init__.py''' )
lowerCAmelCase__ = parse_init(A )
if objects is not None:
lowerCAmelCase__ = analyze_results(*A )
if len(A ) > 0:
lowerCAmelCase__ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(A ) )
if len(A ) > 0:
raise ValueError('''\n\n'''.join(A ) )
def _snake_case ( ) -> str:
lowerCAmelCase__ = []
for path, directories, files in os.walk(A ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(A )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(A ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowerCAmelCase__ = str((Path(A ) / folder).relative_to(A ) )
lowerCAmelCase__ = short_path.replace(os.path.sep , '''.''' )
submodules.append(A )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ = str((Path(A ) / fname).relative_to(A ) )
lowerCAmelCase__ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(A )
return submodules
__UpperCAmelCase = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _snake_case ( ) -> str:
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(A , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(A ) > 0:
lowerCAmelCase__ = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules() | 90 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('''KEY''')
__UpperCAmelCase = TypeVar('''VAL''')
@dataclass(frozen=a__ , slots=a__ )
class a__ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowercase__ : KEY
lowercase__ : VAL
class a__ ( _Item ):
'''simple docstring'''
def __init__( self ) -> None:
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __bool__( self ) -> bool:
return False
__UpperCAmelCase = _DeletedItem()
class a__ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ = 8 , lowerCamelCase_ = 0.75 ) -> None:
lowerCAmelCase__ = initial_block_size
lowerCAmelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCAmelCase__ = capacity_factor
lowerCAmelCase__ = 0
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return hash(lowerCamelCase_ ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool:
lowerCAmelCase__ = self._buckets[ind]
if not stored:
lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = self._buckets
lowerCAmelCase__ = [None] * new_size
lowerCAmelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Iterator[int]:
lowerCAmelCase__ = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCAmelCase__ = self._get_next_ind(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
break
def __setitem__( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_ , lowerCamelCase_ )
def __delitem__( self , lowerCamelCase_ ) -> None:
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCAmelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , lowerCamelCase_ ) -> VAL:
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
lowerCAmelCase__ = ''' ,'''.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})""" | 90 | 1 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _snake_case ( ) -> Tuple:
lowerCAmelCase__ = HfArgumentParser(A )
lowerCAmelCase__ = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase__ = TensorFlowBenchmark(args=A )
try:
lowerCAmelCase__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCAmelCase__ = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowerCAmelCase__ = ''' '''.join(str(A ).split(''' ''' )[:-1] )
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = eval(str(A ).split(''' ''' )[-1] )
lowerCAmelCase__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(A )
if len(A ) > 0:
lowerCAmelCase__ = full_error_msg + begin_error_msg + str(A )
raise ValueError(A )
benchmark.run()
if __name__ == "__main__":
main() | 90 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _snake_case ( A , A , A ) -> Union[str, Any]:
lowerCAmelCase__ = OmegaConf.load(A )
lowerCAmelCase__ = torch.load(A , map_location='''cpu''' )['''model''']
lowerCAmelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase__ = {}
lowerCAmelCase__ = '''first_stage_model.'''
for key in keys:
if key.startswith(A ):
lowerCAmelCase__ = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase__ = {}
lowerCAmelCase__ = '''model.diffusion_model.'''
for key in keys:
if key.startswith(A ):
lowerCAmelCase__ = state_dict[key]
lowerCAmelCase__ = config.model.params.first_stage_config.params
lowerCAmelCase__ = config.model.params.unet_config.params
lowerCAmelCase__ = VQModel(**A ).eval()
vqvae.load_state_dict(A )
lowerCAmelCase__ = UNetLDMModel(**A ).eval()
unet.load_state_dict(A )
lowerCAmelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=A , )
lowerCAmelCase__ = LDMPipeline(A , A , A )
pipeline.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
__UpperCAmelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path) | 90 | 1 |
'''simple docstring'''
from __future__ import annotations
import requests
__UpperCAmelCase = set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def _snake_case ( A , A = 1 , A = "new" , A = None ) -> dict:
lowerCAmelCase__ = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(A ) - valid_terms ) ):
lowerCAmelCase__ = F"""Invalid search term: {invalid_search_terms}"""
raise ValueError(A )
lowerCAmelCase__ = requests.get(
F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 429:
raise requests.HTTPError
lowerCAmelCase__ = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(A )}
lowerCAmelCase__ = {}
for id_ in range(A ):
lowerCAmelCase__ = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext'''])) | 90 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : bool = field(default=a__ , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase__ : bool = field(
default=a__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=a__ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = v.to_dict()
return d | 90 | 1 |
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
__UpperCAmelCase = logging.getLogger(__name__)
__UpperCAmelCase = '''pytorch_model.bin'''
@dataclasses.dataclass
class a__ :
'''simple docstring'''
lowercase__ : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
lowercase__ : Optional[str] = dataclasses.field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class a__ :
'''simple docstring'''
lowercase__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
lowercase__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
lowercase__ : Optional[str] = dataclasses.field(
default=a__ , metadata={"help": "A csv or a json file containing the validation data."} )
lowercase__ : Optional[str] = dataclasses.field(
default=a__ , metadata={"help": "The name of the task to train on."} , )
lowercase__ : Optional[List[str]] = dataclasses.field(
default=a__ , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class a__ :
'''simple docstring'''
lowercase__ : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
lowercase__ : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
lowercase__ : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
lowercase__ : Optional[int] = dataclasses.field(
default=1_0 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
lowercase__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
lowercase__ : Optional[bool] = dataclasses.field(
default=a__ , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
lowercase__ : Optional[bool] = dataclasses.field(
default=a__ , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
lowercase__ : Optional[bool] = dataclasses.field(
default=a__ , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
lowercase__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
lowercase__ : Optional[int] = dataclasses.field(
default=1_0_0 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
lowercase__ : Optional[int] = dataclasses.field(
default=a__ , metadata={"help": "Random seed for initialization."} , )
def _snake_case ( A , A , A , A , A , A ) -> Tuple:
lowerCAmelCase__ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
lowerCAmelCase__ = dataset.filter(lambda A : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
lowerCAmelCase__ = int(eval_result * len(A ) )
print(A )
lowerCAmelCase__ = dataset.sort('''probability''' , reverse=A )
lowerCAmelCase__ = dataset.select(range(A ) )
lowerCAmelCase__ = dataset.remove_columns(['''label''', '''probability'''] )
lowerCAmelCase__ = dataset.rename_column('''prediction''' , '''label''' )
lowerCAmelCase__ = dataset.map(lambda A : {"label": idalabel[example["label"]]} )
lowerCAmelCase__ = dataset.shuffle(seed=args.seed )
lowerCAmelCase__ = os.path.join(A , F"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(A , index=A )
else:
dataset.to_json(A )
def _snake_case ( A , A , A , A , **A ) -> Dict:
lowerCAmelCase__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
lowerCAmelCase__ = STModelArguments(model_name_or_path=A )
lowerCAmelCase__ = STDataArguments(train_file=A , infer_file=A )
lowerCAmelCase__ = STTrainingArguments(output_dir=A )
lowerCAmelCase__ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(A ).items():
setattr(A , A , A )
for key, value in kwargs.items():
if hasattr(A , A ):
setattr(A , A , A )
# Sanity checks
lowerCAmelCase__ = {}
lowerCAmelCase__ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
lowerCAmelCase__ = args.train_file
lowerCAmelCase__ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
lowerCAmelCase__ = args.eval_file
for key in data_files:
lowerCAmelCase__ = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
lowerCAmelCase__ = extension
else:
assert extension == args.data_file_extension, F"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), F"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
lowerCAmelCase__ = F"""{args.output_dir}/self-train_iter-{{}}""".format
lowerCAmelCase__ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=A )
os.makedirs(A , exist_ok=A )
accelerator.wait_for_everyone()
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
# Show the progress bar
lowerCAmelCase__ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
lowerCAmelCase__ = data_dir_format(A )
assert os.path.exists(A )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
lowerCAmelCase__ = os.path.join(A , '''stage-1''' )
lowerCAmelCase__ = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(A , A ):
arguments_dict.update({key: value} )
lowerCAmelCase__ = os.path.join(A , '''best-checkpoint''' , A )
if os.path.exists(A ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , A , A , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , A )
finetune(**A )
accelerator.wait_for_everyone()
assert os.path.exists(A )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , A )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
lowerCAmelCase__ = os.path.join(A , '''best-checkpoint''' )
lowerCAmelCase__ = os.path.join(A , '''stage-2''' )
# Update arguments_dict
lowerCAmelCase__ = model_path
lowerCAmelCase__ = data_files['''train''']
lowerCAmelCase__ = current_output_dir
lowerCAmelCase__ = os.path.join(A , '''best-checkpoint''' , A )
if os.path.exists(A ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , A , A , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , A )
finetune(**A )
accelerator.wait_for_everyone()
assert os.path.exists(A )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , A )
lowerCAmelCase__ = iteration
lowerCAmelCase__ = data_dir_format(iteration + 1 )
lowerCAmelCase__ = AutoConfig.from_pretrained(os.path.join(A , '''best-checkpoint''' ) )
lowerCAmelCase__ = config.idalabel
lowerCAmelCase__ = os.path.join(A , '''eval_results_best-checkpoint.json''' )
lowerCAmelCase__ = os.path.join(A , '''test_results_best-checkpoint.json''' )
assert os.path.exists(A )
with open(A , '''r''' ) as f:
lowerCAmelCase__ = float(json.load(A )[args.eval_metric] )
lowerCAmelCase__ = os.path.join(A , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(A )
# Loading the dataset from local csv or json files.
lowerCAmelCase__ = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
lowerCAmelCase__ = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(A , exist_ok=A )
shutil.copy(A , os.path.join(A , F"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(A ):
shutil.copy(A , os.path.join(A , F"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(A , A , A , A , A , A )
accelerator.wait_for_everyone()
lowerCAmelCase__ = os.path.join(A , F"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
lowerCAmelCase__ = eval_result
if best_iteration is None:
lowerCAmelCase__ = new_iteration
lowerCAmelCase__ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
lowerCAmelCase__ = new_iteration
lowerCAmelCase__ = new_eval_result
lowerCAmelCase__ = 0
else:
if new_eval_result == best_eval_result:
lowerCAmelCase__ = new_iteration
lowerCAmelCase__ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
lowerCAmelCase__ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , A )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , A )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A , F"""eval_results_iter-{iteration}.json""" ) , os.path.join(A , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , A )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A , F"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(A , '''eval_results_best-iteration.json''' ) , ) | 90 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCAmelCase = False
class a__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = generator.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''cyberpunk 2077'''
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCAmelCase__ = '''A painting of a squirrel eating a burger '''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.text_to_image(
prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCAmelCase__ = pipe.image_variation(lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 90 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _snake_case ( A , A , A , A , A ) -> np.array:
lowerCAmelCase__ = int(np.ceil((x_end - xa) / step_size ) )
lowerCAmelCase__ = np.zeros((n + 1,) )
lowerCAmelCase__ = ya
lowerCAmelCase__ = xa
for k in range(A ):
lowerCAmelCase__ = y[k] + step_size * ode_func(A , y[k] )
lowerCAmelCase__ = y[k] + (
(step_size / 2) * (ode_func(A , y[k] ) + ode_func(x + step_size , A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A ) -> bool:
lowerCAmelCase__ = str(A )
return len(A ) == 9 and set(A ) == set('''123456789''' )
def _snake_case ( ) -> int | None:
for base_num in range(9999 , 4999 , -1 ):
lowerCAmelCase__ = 100002 * base_num
if is_9_pandigital(A ):
return candidate
for base_num in range(333 , 99 , -1 ):
lowerCAmelCase__ = 1002003 * base_num
if is_9_pandigital(A ):
return candidate
return None
if __name__ == "__main__":
print(f"""{solution() = }""") | 90 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class a__ :
'''simple docstring'''
lowercase__ : List[str]
lowercase__ : Optional[str] = None
# Automatically constructed
lowercase__ : ClassVar[str] = "dict"
lowercase__ : ClassVar[Any] = None
lowercase__ : str = field(default="Translation" , init=a__ , repr=a__ )
def __call__( self ) -> Any:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __SCREAMING_SNAKE_CASE ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class a__ :
'''simple docstring'''
lowercase__ : Optional[List] = None
lowercase__ : Optional[int] = None
lowercase__ : Optional[str] = None
# Automatically constructed
lowercase__ : ClassVar[str] = "dict"
lowercase__ : ClassVar[Any] = None
lowercase__ : str = field(default="TranslationVariableLanguages" , init=a__ , repr=a__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = sorted(set(self.languages ) ) if self.languages else None
lowerCAmelCase__ = len(self.languages ) if self.languages else None
def __call__( self ) -> List[str]:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
lowerCAmelCase__ = set(self.languages )
if self.languages and set(lowerCamelCase_ ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(lowerCamelCase_ ) - lang_set ) )}) are not in valid set ({", ".join(lowerCamelCase_ )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCAmelCase__ = []
for lang, text in translation_dict.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCAmelCase__ , lowerCAmelCase__ = zip(*sorted(lowerCamelCase_ ) )
return {"language": languages, "translation": translations}
def __SCREAMING_SNAKE_CASE ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
} | 90 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__UpperCAmelCase = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(vocab, range(len(vocab))))
__UpperCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = Path(tmpdirname)
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__UpperCAmelCase = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__UpperCAmelCase = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__UpperCAmelCase = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
__UpperCAmelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__UpperCAmelCase = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru | 90 | 1 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a__ :
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( *lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
@require_torch
class a__ ( unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
lowerCAmelCase__ = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase__ = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = object_detector(examples[0] , threshold=0.0 )
lowerCAmelCase__ = len(lowerCamelCase_ )
self.assertGreater(lowerCamelCase_ , 0 )
self.assertEqual(
lowerCamelCase_ , [
{
'''score''': ANY(lowerCamelCase_ ),
'''label''': ANY(lowerCamelCase_ ),
'''box''': {'''xmin''': ANY(lowerCamelCase_ ), '''ymin''': ANY(lowerCamelCase_ ), '''xmax''': ANY(lowerCamelCase_ ), '''ymax''': ANY(lowerCamelCase_ )},
}
for i in range(lowerCamelCase_ )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase__ = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'''score''': 0.7_235, '''label''': '''cat''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.7_218, '''label''': '''remote''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.7_184, '''label''': '''couch''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.6_748, '''label''': '''remote''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6_656, '''label''': '''cat''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6_614, '''label''': '''couch''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6_456, '''label''': '''remote''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 2_74, '''xmax''': 93, '''ymax''': 2_97}},
{'''score''': 0.6_419, '''label''': '''cat''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}},
] , )
lowerCAmelCase__ = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{'''score''': 0.7_235, '''label''': '''cat''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.7_218, '''label''': '''remote''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.7_184, '''label''': '''couch''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.6_748, '''label''': '''remote''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6_656, '''label''': '''cat''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6_614, '''label''': '''couch''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6_456, '''label''': '''remote''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 2_74, '''xmax''': 93, '''ymax''': 2_97}},
{'''score''': 0.6_419, '''label''': '''cat''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}},
]
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase__ = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}},
{'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}},
{'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}},
] , )
lowerCAmelCase__ = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}},
{'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}},
{'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}},
],
[
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}},
{'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}},
{'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = 0.2
lowerCAmelCase__ = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase__ = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=lowerCamelCase_ , )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}},
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = 2
lowerCAmelCase__ = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase__ = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=lowerCamelCase_ , )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
] , ) | 90 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _snake_case ( ) -> Union[str, Any]:
raise RuntimeError('''CUDA out of memory.''' )
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> int:
super().__init__()
lowerCAmelCase__ = nn.Linear(3 , 4 )
lowerCAmelCase__ = nn.BatchNormad(4 )
lowerCAmelCase__ = nn.Linear(4 , 5 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_ ) ) )
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowerCAmelCase__ , lowerCAmelCase__ = mock_training_loop_function('''hello''' )
self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCamelCase_ ):
pass
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase_ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function(1_28 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase_ ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = torch.cuda.memory_allocated()
lowerCAmelCase__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_ )
lowerCAmelCase__ = release_memory(lowerCamelCase_ )
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_ ) | 90 | 1 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[Any] = XLMRobertaTokenizer
lowercase__ : Optional[Any] = XLMRobertaTokenizerFast
lowercase__ : Union[str, Any] = True
lowercase__ : List[str] = True
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = XLMRobertaTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = '''<pad>'''
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase_ ) , 10_02 )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = XLMRobertaTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase__ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCAmelCase__ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase_ , f.name )
lowerCAmelCase__ = XLMRobertaTokenizer(f.name , keep_accents=lowerCamelCase_ )
lowerCAmelCase__ = pickle.dumps(lowerCamelCase_ )
pickle.loads(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase__ = tokenizer.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ )
lowerCAmelCase__ = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = '''Hello World!'''
lowerCAmelCase__ = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowerCAmelCase__ = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
# fmt: off
lowerCAmelCase__ = {'''input_ids''': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , ) | 90 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__UpperCAmelCase = logging.getLogger(__name__)
def _snake_case ( A , A , A = None , A = None , A = None , A = None , A = None , A = False , ) -> Union[str, Any]:
lowerCAmelCase__ = bnb_quantization_config.load_in_abit
lowerCAmelCase__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
lowerCAmelCase__ = []
# custom device map
if isinstance(A , A ) and len(device_map.keys() ) > 1:
lowerCAmelCase__ = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase__ = get_keys_to_not_convert(A )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(A )
lowerCAmelCase__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase__ = []
lowerCAmelCase__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(A )
# compatibility with peft
lowerCAmelCase__ = load_in_abit
lowerCAmelCase__ = load_in_abit
lowerCAmelCase__ = get_parameter_device(A )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
lowerCAmelCase__ = replace_with_bnb_layers(A , A , modules_to_not_convert=A )
# convert param to the right dtype
lowerCAmelCase__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCAmelCase__ = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
lowerCAmelCase__ = getattr(A , A , A )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(A ):
param.to(A )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
lowerCAmelCase__ = replace_with_bnb_layers(
A , A , modules_to_not_convert=A )
lowerCAmelCase__ = get_quantized_model_device_map(
A , A , A , max_memory=A , no_split_module_classes=A , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase__ = True
lowerCAmelCase__ = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
A , A , A , dtype=bnb_quantization_config.torch_dtype , offload_folder=A , offload_state_dict=A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(A , device_map=A , offload_dir=A )
def _snake_case ( A , A , A=None , A=None , A=None ) -> List[Any]:
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase__ = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(A , A ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
lowerCAmelCase__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCAmelCase__ = {}
lowerCAmelCase__ = special_dtypes
lowerCAmelCase__ = no_split_module_classes
lowerCAmelCase__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase__ = get_balanced_memory(
A , low_zero=(device_map == '''balanced_low_0''') , max_memory=A , **A , )
lowerCAmelCase__ = max_memory
lowerCAmelCase__ = infer_auto_device_map(A , **A )
if isinstance(A , A ):
# check if don't have any quantized module on the cpu
lowerCAmelCase__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def _snake_case ( A , A , A=None , A=None ) -> Any:
if modules_to_not_convert is None:
lowerCAmelCase__ = []
lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers(
A , A , A , A )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def _snake_case ( A , A , A=None , A=None , ) -> Optional[Any]:
lowerCAmelCase__ = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase__ = []
current_key_name.append(A )
if isinstance(A , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase__ = '''.'''.join(A )
lowerCAmelCase__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
lowerCAmelCase__ = module.weight.data
if module.bias is not None:
lowerCAmelCase__ = module.bias.data
bnb_module.requires_grad_(A )
setattr(A , A , A )
lowerCAmelCase__ = True
if len(list(module.children() ) ) > 0:
lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers(
A , A , A , A )
lowerCAmelCase__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _snake_case ( A ) -> Tuple:
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase__ = deepcopy(A ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase__ = find_tied_parameters(A )
# For compatibility with Accelerate < 0.18
if isinstance(A , A ):
lowerCAmelCase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCAmelCase__ = sum(A , [] )
lowerCAmelCase__ = len(A ) > 0
# Check if it is a base model
lowerCAmelCase__ = False
if hasattr(A , '''base_model_prefix''' ):
lowerCAmelCase__ = not hasattr(A , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase__ = list(model.named_children() )
lowerCAmelCase__ = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase__ = set(A ) - set(A )
lowerCAmelCase__ = list(set(A ) ) + list(A )
# remove ".weight" from the keys
lowerCAmelCase__ = ['''.weight''', '''.bias''']
lowerCAmelCase__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase__ = name.replace(A , '''''' )
filtered_module_names.append(A )
return filtered_module_names
def _snake_case ( A ) -> Optional[int]:
for m in model.modules():
if isinstance(A , bnb.nn.Linearabit ):
return True
return False
def _snake_case ( A ) -> Union[str, Any]:
return next(parameter.parameters() ).device
def _snake_case ( A , A , A , A , A , A , A ) -> Any:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(A , A , 0 , dtype=A , value=A )
lowerCAmelCase__ = param_name
lowerCAmelCase__ = model
if "." in tensor_name:
lowerCAmelCase__ = tensor_name.split('''.''' )
for split in splits[:-1]:
lowerCAmelCase__ = getattr(A , A )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
lowerCAmelCase__ = new_module
lowerCAmelCase__ = splits[-1]
# offload weights
lowerCAmelCase__ = False
offload_weight(module._parameters[tensor_name] , A , A , index=A )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , A , index=A , )
else:
offload_weight(A , A , A , index=A )
offload_weight(A , param_name.replace('''weight''' , '''SCB''' ) , A , index=A )
set_module_tensor_to_device(A , A , '''meta''' , dtype=A , value=torch.empty(*param.size() ) ) | 90 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A , A=False , A=False , A=False ) -> Union[str, Any]:
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def _snake_case ( A , A ) -> List[str]:
for i in range(config.num_hidden_layers ):
lowerCAmelCase__ = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _snake_case ( A ) -> List[str]:
lowerCAmelCase__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
def _snake_case ( A , A , A ) -> str:
lowerCAmelCase__ = dct.pop(A )
lowerCAmelCase__ = val
@torch.no_grad()
def _snake_case ( A , A ) -> Any:
lowerCAmelCase__ = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=A )
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
if "vqa" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = 3129
lowerCAmelCase__ = '''huggingface/label-files'''
lowerCAmelCase__ = '''vqa2-id2label.json'''
lowerCAmelCase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase__ = {int(A ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ = ViltForQuestionAnswering(A )
elif "nlvr" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = 2
lowerCAmelCase__ = {0: '''False''', 1: '''True'''}
lowerCAmelCase__ = {v: k for k, v in config.idalabel.items()}
lowerCAmelCase__ = 3
lowerCAmelCase__ = ViltForImagesAndTextClassification(A )
elif "irtr" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = ViltForImageAndTextRetrieval(A )
elif "mlm_itm" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = ViltForMaskedLM(A )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' )['''state_dict''']
lowerCAmelCase__ = create_rename_keys(A , A , A , A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , A )
if mlm_model or irtr_model:
lowerCAmelCase__ = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCAmelCase__ , lowerCAmelCase__ = model.load_state_dict(A , strict=A )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(A )
# Define processor
lowerCAmelCase__ = ViltImageProcessor(size=384 )
lowerCAmelCase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowerCAmelCase__ = ViltProcessor(A , A )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
lowerCAmelCase__ = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCAmelCase__ = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=A ).raw )
if mlm_model:
lowerCAmelCase__ = '''a bunch of [MASK] laying on a [MASK].'''
else:
lowerCAmelCase__ = '''How many cats are there?'''
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = model(**A )
# Verify outputs
if mlm_model:
lowerCAmelCase__ = torch.Size([1, 11, 30522] )
lowerCAmelCase__ = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCAmelCase__ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCAmelCase__ = torch.Size([1, 3129] )
lowerCAmelCase__ = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify vqa prediction equals "2"
lowerCAmelCase__ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCAmelCase__ = torch.Size([1, 2] )
lowerCAmelCase__ = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(A ).mkdir(exist_ok=A )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
processor.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__UpperCAmelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 90 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _snake_case ( A , A , A , A , A ) -> np.array:
lowerCAmelCase__ = int(np.ceil((x_end - xa) / step_size ) )
lowerCAmelCase__ = np.zeros((n + 1,) )
lowerCAmelCase__ = ya
lowerCAmelCase__ = xa
for k in range(A ):
lowerCAmelCase__ = y[k] + step_size * ode_func(A , y[k] )
lowerCAmelCase__ = y[k] + (
(step_size / 2) * (ode_func(A , y[k] ) + ode_func(x + step_size , A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 | 1 |
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _snake_case ( A ) -> str:
return "".join(sorted(A ) )
def _snake_case ( A ) -> list[str]:
return word_by_signature[signature(A )]
__UpperCAmelCase = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
__UpperCAmelCase = sorted({word.strip().lower() for word in data.splitlines()})
__UpperCAmelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__UpperCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams)) | 90 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=64 , lowerCamelCase_=None ) -> Dict:
lowerCAmelCase__ = np.random.default_rng(lowerCamelCase_ )
lowerCAmelCase__ = length
lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Any:
return self.length
def __getitem__( self , lowerCamelCase_ ) -> List[str]:
return {"x": self.x[i], "y": self.y[i]}
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> List[Any]:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Optional[Any]:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a[0] + self.b[0]
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> Any:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a + self.b
def _snake_case ( A , A = 16 ) -> Any:
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase__ = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
lowerCAmelCase__ = load_dataset('''csv''' , data_files=A )
lowerCAmelCase__ = datasets['''train'''].unique('''label''' )
lowerCAmelCase__ = {v: i for i, v in enumerate(A )}
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A , padding='''max_length''' )
if "label" in examples:
lowerCAmelCase__ = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
A , batched=A , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(A , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=2 )
lowerCAmelCase__ = DataLoader(tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=1 )
return train_dataloader, eval_dataloader | 90 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_12 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ) -> Optional[int]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = 13
lowerCAmelCase__ = 7
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = 99
lowerCAmelCase__ = 3_84
lowerCAmelCase__ = 2
lowerCAmelCase__ = 4
lowerCAmelCase__ = 37
lowerCAmelCase__ = '''gelu'''
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 5_12
lowerCAmelCase__ = 16
lowerCAmelCase__ = 2
lowerCAmelCase__ = 0.02
lowerCAmelCase__ = 3
lowerCAmelCase__ = 4
lowerCAmelCase__ = 1_28
lowerCAmelCase__ = 2
lowerCAmelCase__ = 9
lowerCAmelCase__ = 1
lowerCAmelCase__ = None
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCamelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = TFConvBertModel(config=lowerCamelCase_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(lowerCamelCase_ )
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
lowerCAmelCase__ = TFConvBertForMaskedLM(config=lowerCamelCase_ )
lowerCAmelCase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFConvBertForSequenceClassification(config=lowerCamelCase_ )
lowerCAmelCase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFConvBertForMultipleChoice(config=lowerCamelCase_ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFConvBertForTokenClassification(config=lowerCamelCase_ )
lowerCAmelCase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
lowerCAmelCase__ = TFConvBertForQuestionAnswering(config=lowerCamelCase_ )
lowerCAmelCase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class a__ ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Tuple = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase__ : int = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__ : Any = False
lowercase__ : Dict = False
lowercase__ : int = False
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = TFConvBertModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
lowerCAmelCase__ = True
if hasattr(lowerCamelCase_ , '''use_cache''' ):
lowerCAmelCase__ = True
lowerCAmelCase__ = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
lowerCAmelCase__ = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
for model_class in self.all_model_classes:
lowerCAmelCase__ = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = len(model(lowerCamelCase_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ )
lowerCAmelCase__ = os.path.join(lowerCamelCase_ , '''saved_model''' , '''1''' )
lowerCAmelCase__ = tf.keras.models.load_model(lowerCamelCase_ )
lowerCAmelCase__ = model(lowerCamelCase_ )
if self.is_encoder_decoder:
lowerCAmelCase__ = outputs['''encoder_hidden_states''']
lowerCAmelCase__ = outputs['''encoder_attentions''']
else:
lowerCAmelCase__ = outputs['''hidden_states''']
lowerCAmelCase__ = outputs['''attentions''']
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
lowerCAmelCase__ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
lowerCAmelCase__ = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
lowerCAmelCase__ = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
lowerCAmelCase__ = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
lowerCAmelCase__ = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
def check_decoder_attentions_output(lowerCamelCase_ ):
lowerCAmelCase__ = len(lowerCamelCase_ )
self.assertEqual(out_len % 2 , 0 )
lowerCAmelCase__ = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowerCamelCase_ ):
lowerCAmelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
lowerCAmelCase__ = len(lowerCamelCase_ )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
if self.is_encoder_decoder:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_decoder_attentions_output(lowerCamelCase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
# Check attention is always last and order is fine
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase_ ) )
self.assertEqual(model.config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
@require_tf
class a__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
lowerCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase__ = model(lowerCamelCase_ )[0]
lowerCAmelCase__ = [1, 6, 7_68]
self.assertEqual(output.shape , lowerCamelCase_ )
lowerCAmelCase__ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1e-4 ) | 90 |
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__UpperCAmelCase = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def _snake_case ( A , A=None ) -> Optional[Any]:
require_version(deps[pkg] , A ) | 90 | 1 |
'''simple docstring'''
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = n
lowerCAmelCase__ = [None] * self.n
lowerCAmelCase__ = 0 # index of the first element
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
def __len__( self ) -> int:
return self.size
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
return self.size == 0
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return False if self.is_empty() else self.array[self.front]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[int]:
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
lowerCAmelCase__ = data
lowerCAmelCase__ = (self.rear + 1) % self.n
self.size += 1
return self
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
if self.size == 0:
raise Exception('''UNDERFLOW''' )
lowerCAmelCase__ = self.array[self.front]
lowerCAmelCase__ = None
lowerCAmelCase__ = (self.front + 1) % self.n
self.size -= 1
return temp | 90 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A , A=False , A=False , A=False ) -> Union[str, Any]:
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def _snake_case ( A , A ) -> List[str]:
for i in range(config.num_hidden_layers ):
lowerCAmelCase__ = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _snake_case ( A ) -> List[str]:
lowerCAmelCase__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
def _snake_case ( A , A , A ) -> str:
lowerCAmelCase__ = dct.pop(A )
lowerCAmelCase__ = val
@torch.no_grad()
def _snake_case ( A , A ) -> Any:
lowerCAmelCase__ = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=A )
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
if "vqa" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = 3129
lowerCAmelCase__ = '''huggingface/label-files'''
lowerCAmelCase__ = '''vqa2-id2label.json'''
lowerCAmelCase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase__ = {int(A ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ = ViltForQuestionAnswering(A )
elif "nlvr" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = 2
lowerCAmelCase__ = {0: '''False''', 1: '''True'''}
lowerCAmelCase__ = {v: k for k, v in config.idalabel.items()}
lowerCAmelCase__ = 3
lowerCAmelCase__ = ViltForImagesAndTextClassification(A )
elif "irtr" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = ViltForImageAndTextRetrieval(A )
elif "mlm_itm" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = ViltForMaskedLM(A )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' )['''state_dict''']
lowerCAmelCase__ = create_rename_keys(A , A , A , A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , A )
if mlm_model or irtr_model:
lowerCAmelCase__ = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCAmelCase__ , lowerCAmelCase__ = model.load_state_dict(A , strict=A )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(A )
# Define processor
lowerCAmelCase__ = ViltImageProcessor(size=384 )
lowerCAmelCase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowerCAmelCase__ = ViltProcessor(A , A )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
lowerCAmelCase__ = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCAmelCase__ = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=A ).raw )
if mlm_model:
lowerCAmelCase__ = '''a bunch of [MASK] laying on a [MASK].'''
else:
lowerCAmelCase__ = '''How many cats are there?'''
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = model(**A )
# Verify outputs
if mlm_model:
lowerCAmelCase__ = torch.Size([1, 11, 30522] )
lowerCAmelCase__ = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCAmelCase__ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCAmelCase__ = torch.Size([1, 3129] )
lowerCAmelCase__ = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify vqa prediction equals "2"
lowerCAmelCase__ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCAmelCase__ = torch.Size([1, 2] )
lowerCAmelCase__ = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(A ).mkdir(exist_ok=A )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
processor.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__UpperCAmelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 90 | 1 |
'''simple docstring'''
import os
import string
import sys
__UpperCAmelCase = 1 << 8
__UpperCAmelCase = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
__UpperCAmelCase = KEYMAP['''up''']
__UpperCAmelCase = KEYMAP['''left''']
if sys.platform == "win32":
__UpperCAmelCase = []
__UpperCAmelCase = {
b'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
b'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
__UpperCAmelCase = ord(str(i))
def _snake_case ( ) -> List[Any]:
if os.name == "nt":
import msvcrt
lowerCAmelCase__ = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(A ) == 0:
# Read the keystroke
lowerCAmelCase__ = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase__ = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase__ = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(A )
if ord(A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase__ = chr(KEYMAP['''esc'''] )
except KeyError:
lowerCAmelCase__ = cha[1]
else:
lowerCAmelCase__ = ch.decode(A )
else:
lowerCAmelCase__ = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase__ = sys.stdin.fileno()
lowerCAmelCase__ = termios.tcgetattr(A )
try:
tty.setraw(A )
lowerCAmelCase__ = sys.stdin.read(1 )
finally:
termios.tcsetattr(A , termios.TCSADRAIN , A )
return ch
def _snake_case ( ) -> Optional[Any]:
lowerCAmelCase__ = get_raw_chars()
if ord(A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(A ) == KEYMAP["esc"]:
lowerCAmelCase__ = get_raw_chars()
if ord(A ) == KEYMAP["mod_int"]:
lowerCAmelCase__ = get_raw_chars()
if ord(A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 90 |
'''simple docstring'''
import re
def _snake_case ( A ) -> bool:
lowerCAmelCase__ = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(A , A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895''')) | 90 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__UpperCAmelCase = logging.get_logger(__name__)
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[int] = ["pixel_values"]
def __init__( self , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = PILImageResampling.BILINEAR , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = True , lowerCamelCase_ = 1 / 2_55 , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = size if size is not None else {'''shortest_edge''': 2_56}
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
lowerCAmelCase__ = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ , param_name='''crop_size''' )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_center_crop
lowerCAmelCase__ = crop_size
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = PILImageResampling.BICUBIC , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCAmelCase__ = get_resize_output_image_size(lowerCamelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCamelCase_ )
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowerCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ ) -> np.ndarray:
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = ChannelDimension.FIRST , **lowerCamelCase_ , ) -> Dict:
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ , param_name='''crop_size''' )
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
lowerCAmelCase__ = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
lowerCAmelCase__ = [self.center_crop(image=lowerCamelCase_ , size=lowerCamelCase_ ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
lowerCAmelCase__ = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[Any]:
lowerCAmelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCamelCase_ ):
lowerCAmelCase__ = target_sizes.numpy()
lowerCAmelCase__ = []
for idx in range(len(lowerCamelCase_ ) ):
lowerCAmelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCamelCase_ )
lowerCAmelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase_ )
else:
lowerCAmelCase__ = logits.argmax(dim=1 )
lowerCAmelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 90 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.txt'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
__UpperCAmelCase = {
'''facebook/esm2_t6_8M_UR50D''': 1_024,
'''facebook/esm2_t12_35M_UR50D''': 1_024,
}
def _snake_case ( A ) -> Optional[Any]:
with open(A , '''r''' ) as f:
lowerCAmelCase__ = f.read().splitlines()
return [l.strip() for l in lines]
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_="<eos>" , **lowerCamelCase_ , ) -> Tuple:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = load_vocab_file(lowerCamelCase_ )
lowerCAmelCase__ = dict(enumerate(self.all_tokens ) )
lowerCAmelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowerCAmelCase__ = unk_token
lowerCAmelCase__ = cls_token
lowerCAmelCase__ = pad_token
lowerCAmelCase__ = mask_token
lowerCAmelCase__ = eos_token
lowerCAmelCase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]:
return text.split()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False ) -> Dict:
return len(self._id_to_token )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return {token: i for i, token in enumerate(self.all_tokens )}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowerCAmelCase__ = [1] + ([0] * len(lowerCamelCase_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCamelCase_ ) + [1]
return mask
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = os.path.join(lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self.get_vocab_size(with_added_tokens=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False ) -> int:
return super()._add_tokens(lowerCamelCase_ , special_tokens=lowerCamelCase_ ) | 90 | 1 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_00 , lowerCamelCase_=13 , lowerCamelCase_=30 , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=32 , lowerCamelCase_=4 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=10 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=None , lowerCamelCase_=[0, 1, 2, 3] , ) -> List[str]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = 1_00
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = out_indices
lowerCAmelCase__ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = num_patches + 1
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
lowerCAmelCase__ = BeitModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = BeitForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = BeitForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = BeitForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = BeitForSemanticSegmentation(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
lowerCAmelCase__ = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : str = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ : List[Any] = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ : Tuple = False
lowercase__ : Dict = False
lowercase__ : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = BeitModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
if not self.model_tester.is_training:
return
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowerCamelCase_ ), BeitForMaskedImageModeling]:
continue
lowerCAmelCase__ = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
lowerCAmelCase__ = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
lowerCAmelCase__ = model(**lowerCamelCase_ ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCAmelCase__ = False
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowerCamelCase_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCAmelCase__ = model_class(lowerCamelCase_ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase_ )
model.train()
lowerCAmelCase__ = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
lowerCAmelCase__ = model(**lowerCamelCase_ ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = BeitModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def _snake_case ( ) -> Optional[int]:
lowerCAmelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(lowerCamelCase_ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).pixel_values.to(lowerCamelCase_ )
# prepare bool_masked_pos
lowerCAmelCase__ = torch.ones((1, 1_96) , dtype=torch.bool ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(pixel_values=lowerCamelCase_ , bool_masked_pos=lowerCamelCase_ )
lowerCAmelCase__ = outputs.logits
# verify the logits
lowerCAmelCase__ = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , lowerCamelCase_ )
lowerCAmelCase__ = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase_ , atol=1e-2 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(lowerCamelCase_ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCamelCase_ )
lowerCAmelCase__ = outputs.logits
# verify the logits
lowerCAmelCase__ = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , lowerCamelCase_ )
lowerCAmelCase__ = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
lowerCAmelCase__ = 2_81
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
lowerCamelCase_ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCamelCase_ )
lowerCAmelCase__ = outputs.logits
# verify the logits
lowerCAmelCase__ = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , lowerCamelCase_ )
lowerCAmelCase__ = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
lowerCAmelCase__ = 23_96
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowerCAmelCase__ = model.to(lowerCamelCase_ )
lowerCAmelCase__ = BeitImageProcessor(do_resize=lowerCamelCase_ , size=6_40 , do_center_crop=lowerCamelCase_ )
lowerCAmelCase__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowerCAmelCase__ = Image.open(ds[0]['''file'''] )
lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCamelCase_ )
lowerCAmelCase__ = outputs.logits
# verify the logits
lowerCAmelCase__ = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , lowerCamelCase_ )
lowerCAmelCase__ = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
lowerCAmelCase__ = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] , device=lowerCamelCase_ , )
else:
lowerCAmelCase__ = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] , device=lowerCamelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase_ , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowerCAmelCase__ = model.to(lowerCamelCase_ )
lowerCAmelCase__ = BeitImageProcessor(do_resize=lowerCamelCase_ , size=6_40 , do_center_crop=lowerCamelCase_ )
lowerCAmelCase__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowerCAmelCase__ = Image.open(ds[0]['''file'''] )
lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCamelCase_ )
lowerCAmelCase__ = outputs.logits.detach().cpu()
lowerCAmelCase__ = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase_ , target_sizes=[(5_00, 3_00)] )
lowerCAmelCase__ = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , lowerCamelCase_ )
lowerCAmelCase__ = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase_ )
lowerCAmelCase__ = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , lowerCamelCase_ ) | 90 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = AltDiffusionPipeline
lowercase__ : Dict = TEXT_TO_IMAGE_PARAMS
lowercase__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __SCREAMING_SNAKE_CASE ( self ) -> str:
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowerCAmelCase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
lowerCAmelCase__ = CLIPTextModel(lowerCamelCase_ )
lowerCAmelCase__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCAmelCase__ = 77
lowerCAmelCase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> List[str]:
if str(lowerCamelCase_ ).startswith('''mps''' ):
lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ )
else:
lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCAmelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase__ = '''A photo of an astronaut'''
lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''numpy''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 90 | 1 |
'''simple docstring'''
def _snake_case ( A ) -> float:
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
lowerCAmelCase__ = sum(A ) / len(A ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(A )
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 |
'''simple docstring'''
def _snake_case ( A , A ) -> int:
return x if y == 0 else greatest_common_divisor(A , x % y )
def _snake_case ( A , A ) -> int:
return (x * y) // greatest_common_divisor(A , A )
def _snake_case ( A = 20 ) -> int:
lowerCAmelCase__ = 1
for i in range(1 , n + 1 ):
lowerCAmelCase__ = lcm(A , A )
return g
if __name__ == "__main__":
print(f"""{solution() = }""") | 90 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''Salesforce/blip-vqa-base''': '''https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json''',
'''Salesforce/blip-vqa-capfit-large''': (
'''https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-base''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-large''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'''
),
'''Salesforce/blip-itm-base-coco''': '''https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-large-coco''': '''https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-base-flikr''': '''https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json''',
'''Salesforce/blip-itm-large-flikr''': (
'''https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'''
),
}
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Any = "blip_text_model"
def __init__( self , lowerCamelCase_=3_05_24 , lowerCamelCase_=7_68 , lowerCamelCase_=7_68 , lowerCamelCase_=30_72 , lowerCamelCase_=7_68 , lowerCamelCase_=12 , lowerCamelCase_=8 , lowerCamelCase_=5_12 , lowerCamelCase_="gelu" , lowerCamelCase_=1e-12 , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.02 , lowerCamelCase_=3_05_22 , lowerCamelCase_=2 , lowerCamelCase_=0 , lowerCamelCase_=1_02 , lowerCamelCase_=True , lowerCamelCase_=True , **lowerCamelCase_ , ) -> Any:
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , sep_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = encoder_hidden_size
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = projection_dim
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = is_decoder
lowerCAmelCase__ = use_cache
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , lowerCamelCase_ , **lowerCamelCase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
lowerCAmelCase__ = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[int] = "blip_vision_model"
def __init__( self , lowerCamelCase_=7_68 , lowerCamelCase_=30_72 , lowerCamelCase_=5_12 , lowerCamelCase_=12 , lowerCamelCase_=12 , lowerCamelCase_=3_84 , lowerCamelCase_=16 , lowerCamelCase_="gelu" , lowerCamelCase_=1e-5 , lowerCamelCase_=0.0 , lowerCamelCase_=1e-10 , **lowerCamelCase_ , ) -> List[str]:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = projection_dim
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = hidden_act
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , lowerCamelCase_ , **lowerCamelCase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
lowerCAmelCase__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[int] = "blip"
lowercase__ : Union[str, Any] = True
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=5_12 , lowerCamelCase_=2.6_592 , lowerCamelCase_=2_56 , **lowerCamelCase_ , ) -> Dict:
super().__init__(**lowerCamelCase_ )
if text_config is None:
lowerCAmelCase__ = {}
logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' )
if vision_config is None:
lowerCAmelCase__ = {}
logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' )
lowerCAmelCase__ = BlipTextConfig(**lowerCamelCase_ )
lowerCAmelCase__ = BlipVisionConfig(**lowerCamelCase_ )
lowerCAmelCase__ = self.vision_config.hidden_size
lowerCAmelCase__ = projection_dim
lowerCAmelCase__ = logit_scale_init_value
lowerCAmelCase__ = 1.0
lowerCAmelCase__ = 0.02
lowerCAmelCase__ = image_text_hidden_size
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ = self.text_config.to_dict()
lowerCAmelCase__ = self.vision_config.to_dict()
lowerCAmelCase__ = self.__class__.model_type
return output | 90 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCAmelCase = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def _snake_case ( A , A=None , A=None , A=None ) -> Union[str, Any]:
lowerCAmelCase__ = True
while ask_again:
lowerCAmelCase__ = input(A )
try:
if default is not None and len(A ) == 0:
return default
return convert_value(A ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(A )
def _snake_case ( A , A=[] , A=None , A=0 ) -> List[Any]:
lowerCAmelCase__ = BulletMenu(A , A )
lowerCAmelCase__ = menu.run(default_choice=A )
return convert_value(A ) if convert_value is not None else result
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = int(A )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = int(A )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def _snake_case ( A ) -> str:
lowerCAmelCase__ = int(A )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = int(A )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = int(A )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def _snake_case ( A ) -> List[str]:
return {"yes": True, "no": False}[value.lower()]
class a__ ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = super()._format_usage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = usage.replace('''<command> [<args>] ''' , '''''' )
return usage | 90 | 1 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__UpperCAmelCase = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(vocab, range(len(vocab))))
__UpperCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = Path(tmpdirname)
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__UpperCAmelCase = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__UpperCAmelCase = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__UpperCAmelCase = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
__UpperCAmelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__UpperCAmelCase = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru | 90 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : torch.FloatTensor
class a__ ( a__ , a__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCamelCase_ = 3 , lowerCamelCase_ = 3 , lowerCamelCase_ = ("DownEncoderBlock2D",) , lowerCamelCase_ = ("UpDecoderBlock2D",) , lowerCamelCase_ = (64,) , lowerCamelCase_ = 1 , lowerCamelCase_ = "silu" , lowerCamelCase_ = 3 , lowerCamelCase_ = 32 , lowerCamelCase_ = 2_56 , lowerCamelCase_ = 32 , lowerCamelCase_ = None , lowerCamelCase_ = 0.18_215 , lowerCamelCase_ = "group" , ) -> Union[str, Any]:
super().__init__()
# pass init params to Encoder
lowerCAmelCase__ = Encoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , down_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , double_z=lowerCamelCase_ , )
lowerCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
lowerCAmelCase__ = VectorQuantizer(lowerCamelCase_ , lowerCamelCase_ , beta=0.25 , remap=lowerCamelCase_ , sane_index_shape=lowerCamelCase_ )
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
# pass init params to Decoder
lowerCAmelCase__ = Decoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , up_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , norm_type=lowerCamelCase_ , )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> VQEncoderOutput:
lowerCAmelCase__ = self.encoder(lowerCamelCase_ )
lowerCAmelCase__ = self.quant_conv(lowerCamelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCamelCase_ )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.quantize(lowerCamelCase_ )
else:
lowerCAmelCase__ = h
lowerCAmelCase__ = self.post_quant_conv(lowerCamelCase_ )
lowerCAmelCase__ = self.decoder(lowerCamelCase_ , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase__ = sample
lowerCAmelCase__ = self.encode(lowerCamelCase_ ).latents
lowerCAmelCase__ = self.decode(lowerCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ ) | 90 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class a__ ( unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[Any] = MODEL_FOR_CAUSAL_LM_MAPPING
lowercase__ : str = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ = text_generator('''This is a test''' , do_sample=lowerCamelCase_ )
self.assertEqual(
lowerCamelCase_ , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
lowerCAmelCase__ = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
lowerCamelCase_ , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
lowerCAmelCase__ = text_generator('''This is a test''' , do_sample=lowerCamelCase_ , num_return_sequences=2 , return_tensors=lowerCamelCase_ )
self.assertEqual(
lowerCamelCase_ , [
{'''generated_token_ids''': ANY(lowerCamelCase_ )},
{'''generated_token_ids''': ANY(lowerCamelCase_ )},
] , )
lowerCAmelCase__ = text_generator.model.config.eos_token_id
lowerCAmelCase__ = '''<pad>'''
lowerCAmelCase__ = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=lowerCamelCase_ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCamelCase_ , )
self.assertEqual(
lowerCamelCase_ , [
[
{'''generated_token_ids''': ANY(lowerCamelCase_ )},
{'''generated_token_ids''': ANY(lowerCamelCase_ )},
],
[
{'''generated_token_ids''': ANY(lowerCamelCase_ )},
{'''generated_token_ids''': ANY(lowerCamelCase_ )},
],
] , )
@require_tf
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ = text_generator('''This is a test''' , do_sample=lowerCamelCase_ )
self.assertEqual(
lowerCamelCase_ , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
lowerCAmelCase__ = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=lowerCamelCase_ )
self.assertEqual(
lowerCamelCase_ , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
lowerCAmelCase__ = TextGenerationPipeline(model=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
return text_generator, ["This is a test", "Another test"]
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = '''Hello I believe in'''
lowerCAmelCase__ = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ = text_generator(lowerCamelCase_ )
self.assertEqual(
lowerCamelCase_ , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
lowerCAmelCase__ = text_generator(lowerCamelCase_ , stop_sequence=''' fe''' )
self.assertEqual(lowerCamelCase_ , [{'''generated_text''': '''Hello I believe in fe'''}] )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
lowerCAmelCase__ = text_generator.model
lowerCAmelCase__ = text_generator.tokenizer
lowerCAmelCase__ = text_generator('''This is a test''' )
self.assertEqual(lowerCamelCase_ , [{'''generated_text''': ANY(lowerCamelCase_ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ = text_generator('''This is a test''' , return_full_text=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , [{'''generated_text''': ANY(lowerCamelCase_ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ = pipeline(task='''text-generation''' , model=lowerCamelCase_ , tokenizer=lowerCamelCase_ , return_full_text=lowerCamelCase_ )
lowerCAmelCase__ = text_generator('''This is a test''' )
self.assertEqual(lowerCamelCase_ , [{'''generated_text''': ANY(lowerCamelCase_ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ = text_generator('''This is a test''' , return_full_text=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , [{'''generated_text''': ANY(lowerCamelCase_ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=lowerCamelCase_ )
self.assertEqual(
lowerCamelCase_ , [
[{'''generated_text''': ANY(lowerCamelCase_ )}, {'''generated_text''': ANY(lowerCamelCase_ )}],
[{'''generated_text''': ANY(lowerCamelCase_ )}, {'''generated_text''': ANY(lowerCamelCase_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCAmelCase__ = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCamelCase_ )
self.assertEqual(
lowerCamelCase_ , [
[{'''generated_text''': ANY(lowerCamelCase_ )}, {'''generated_text''': ANY(lowerCamelCase_ )}],
[{'''generated_text''': ANY(lowerCamelCase_ )}, {'''generated_text''': ANY(lowerCamelCase_ )}],
] , )
with self.assertRaises(lowerCamelCase_ ):
lowerCAmelCase__ = text_generator('''test''' , return_full_text=lowerCamelCase_ , return_text=lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ ):
lowerCAmelCase__ = text_generator('''test''' , return_full_text=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ ):
lowerCAmelCase__ = text_generator('''test''' , return_text=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCAmelCase__ = text_generator('''''' )
self.assertEqual(lowerCamelCase_ , [{'''generated_text''': ANY(lowerCamelCase_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCAmelCase__ = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCAmelCase__ = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_00_00
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 5_00 , max_new_tokens=20 )
lowerCAmelCase__ = text_generator('''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(lowerCamelCase_ ):
text_generator(
'''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
import torch
# Classic `model_kwargs`
lowerCAmelCase__ = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ = pipe('''This is a test''' )
self.assertEqual(
lowerCamelCase_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCAmelCase__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ = pipe('''This is a test''' )
self.assertEqual(
lowerCamelCase_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCAmelCase__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCAmelCase__ = pipe('''This is a test''' )
self.assertEqual(
lowerCamelCase_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
import torch
lowerCAmelCase__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
import torch
lowerCAmelCase__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=lowerCamelCase_ , top_p=0.5 )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = '''Hello world'''
lowerCAmelCase__ = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
lowerCAmelCase__ = logging.get_logger('''transformers.generation.tf_utils''' )
else:
lowerCAmelCase__ = logging.get_logger('''transformers.generation.utils''' )
lowerCAmelCase__ = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(lowerCamelCase_ ) as cl:
lowerCAmelCase__ = text_generator(lowerCamelCase_ , max_length=10 , max_new_tokens=1 )
self.assertIn(lowerCamelCase_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(lowerCamelCase_ ) as cl:
lowerCAmelCase__ = text_generator(lowerCamelCase_ , max_new_tokens=1 )
self.assertNotIn(lowerCamelCase_ , cl.out )
with CaptureLogger(lowerCamelCase_ ) as cl:
lowerCAmelCase__ = text_generator(lowerCamelCase_ , max_length=10 )
self.assertNotIn(lowerCamelCase_ , cl.out ) | 90 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = list[list[int]]
# assigning initial values to the grid
__UpperCAmelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCAmelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _snake_case ( A , A , A , A ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _snake_case ( A ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _snake_case ( A ) -> Matrix | None:
if location := find_empty_location(A ):
lowerCAmelCase__ , lowerCAmelCase__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A , A , A , A ):
lowerCAmelCase__ = digit
if sudoku(A ) is not None:
return grid
lowerCAmelCase__ = 0
return None
def _snake_case ( A ) -> None:
for row in grid:
for cell in row:
print(A , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__UpperCAmelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''') | 90 | 1 |
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase = logging.getLogger()
__UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class a__ ( a__ ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[str]:
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
lowerCAmelCase__ = {'''source''': '''What is love ?''', '''target''': '''life'''}
lowerCAmelCase__ = {'''train''': 12, '''val''': 2, '''test''': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCAmelCase__ = '''\n'''.join([contents[field]] * n_lines[split] )
with open(os.path.join(lowerCamelCase_ , F"""{split}.{field}""" ) , '''w''' ) as f:
f.write(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = "pytorch" ) -> List[str]:
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = os.path.join(lowerCamelCase_ , '''output''' )
lowerCAmelCase__ = os.path.join(lowerCamelCase_ , '''data''' )
self._create_dummy_data(data_dir=lowerCamelCase_ )
lowerCAmelCase__ = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('''--fp16''' )
else:
testargs.append('''--gpus=0''' )
testargs.append('''--distributed_backend=ddp_cpu''' )
testargs.append('''--num_processes=2''' )
lowerCAmelCase__ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowerCamelCase_ , env=self.get_env() )
lowerCAmelCase__ = os.path.join(lowerCamelCase_ , '''metrics.json''' )
with open(lowerCamelCase_ ) as f:
lowerCAmelCase__ = json.load(lowerCamelCase_ )
return result
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_gpu
@require_ray
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
@require_ray
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 ) | 90 |
'''simple docstring'''
def _snake_case ( A ) -> int:
if n == 1 or not isinstance(A , A ):
return 0
elif n == 2:
return 1
else:
lowerCAmelCase__ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _snake_case ( A ) -> int:
lowerCAmelCase__ = 0
lowerCAmelCase__ = 2
while digits < n:
index += 1
lowerCAmelCase__ = len(str(fibonacci(A ) ) )
return index
def _snake_case ( A = 1000 ) -> int:
return fibonacci_digits_index(A )
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 90 | 1 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__UpperCAmelCase = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None ) -> Any:
lowerCAmelCase__ = None
lowerCAmelCase__ = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowerCAmelCase__ = os.path.abspath('''examples''' )
for item in os.listdir(lowerCamelCase_ ):
if item not in EXCLUDE_EXAMPLES:
lowerCAmelCase__ = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ) and ".py" in item_path:
with self.subTest(
tested_script=lowerCamelCase_ , feature_script=lowerCamelCase_ , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowerCAmelCase__ = compare_against_test(
os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = '''\n'''.join(lowerCamelCase_ )
if special_strings is not None:
for string in special_strings:
lowerCAmelCase__ = diff.replace(lowerCamelCase_ , '''''' )
self.assertEqual(lowerCamelCase_ , '''''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
self.one_complete_example('''complete_nlp_example.py''' , lowerCamelCase_ )
self.one_complete_example('''complete_nlp_example.py''' , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowerCAmelCase__ = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
self.one_complete_example('''complete_cv_example.py''' , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = False
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ) -> Optional[Any]:
super().setUpClass()
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase__ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ) -> str:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
lowerCAmelCase__ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
""".split()
lowerCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=lowerCamelCase_ )
self.assertNotIn('''epoch 0:''' , lowerCamelCase_ )
self.assertIn('''epoch 1:''' , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
""".split()
lowerCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=lowerCamelCase_ )
if torch.cuda.is_available():
lowerCAmelCase__ = torch.cuda.device_count()
else:
lowerCAmelCase__ = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , lowerCamelCase_ )
self.assertIn('''epoch 1:''' , lowerCamelCase_ )
else:
self.assertIn('''epoch 0:''' , lowerCamelCase_ )
self.assertIn('''epoch 1:''' , lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=lowerCamelCase_ )
lowerCAmelCase__ = re.findall('''({.+})''' , lowerCamelCase_ )
lowerCAmelCase__ = [r for r in results if '''accuracy''' in r][-1]
lowerCAmelCase__ = ast.literal_eval(lowerCamelCase_ )
self.assertGreaterEqual(results['''accuracy'''] , 0.75 )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
lowerCAmelCase__ = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ , '''tracking''' ) ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs ) | 90 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def _snake_case ( A ) -> int:
return choice(A )
def _snake_case ( A , A ) -> int:
lowerCAmelCase__ = random_pivot(A )
# partition based on pivot
# linear time
lowerCAmelCase__ = [e for e in lst if e < pivot]
lowerCAmelCase__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(A ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(A ) < k - 1:
return kth_number(A , k - len(A ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(A , A )
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 | 1 |
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def _snake_case ( ) -> Optional[Any]:
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=A , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=A , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=A , help='''where to store parsed gold_data_path file''' , )
lowerCAmelCase__ = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
lowerCAmelCase__ = json.load(A )
for dpr_record in tqdm(A ):
lowerCAmelCase__ = dpr_record['''question''']
lowerCAmelCase__ = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(A ) + '''\n''' )
if __name__ == "__main__":
main() | 90 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 90 | 1 |
'''simple docstring'''
__UpperCAmelCase = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich | 90 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('''KEY''')
__UpperCAmelCase = TypeVar('''VAL''')
@dataclass(frozen=a__ , slots=a__ )
class a__ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowercase__ : KEY
lowercase__ : VAL
class a__ ( _Item ):
'''simple docstring'''
def __init__( self ) -> None:
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __bool__( self ) -> bool:
return False
__UpperCAmelCase = _DeletedItem()
class a__ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ = 8 , lowerCamelCase_ = 0.75 ) -> None:
lowerCAmelCase__ = initial_block_size
lowerCAmelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCAmelCase__ = capacity_factor
lowerCAmelCase__ = 0
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return hash(lowerCamelCase_ ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool:
lowerCAmelCase__ = self._buckets[ind]
if not stored:
lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = self._buckets
lowerCAmelCase__ = [None] * new_size
lowerCAmelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Iterator[int]:
lowerCAmelCase__ = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCAmelCase__ = self._get_next_ind(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
break
def __setitem__( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_ , lowerCamelCase_ )
def __delitem__( self , lowerCamelCase_ ) -> None:
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCAmelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , lowerCamelCase_ ) -> VAL:
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
lowerCAmelCase__ = ''' ,'''.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})""" | 90 | 1 |
'''simple docstring'''
def _snake_case ( A , A ) -> int:
lowerCAmelCase__ = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCAmelCase__ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowerCAmelCase__ = min(A , A )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5)) | 90 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _snake_case ( A , A , A ) -> Union[str, Any]:
lowerCAmelCase__ = OmegaConf.load(A )
lowerCAmelCase__ = torch.load(A , map_location='''cpu''' )['''model''']
lowerCAmelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase__ = {}
lowerCAmelCase__ = '''first_stage_model.'''
for key in keys:
if key.startswith(A ):
lowerCAmelCase__ = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase__ = {}
lowerCAmelCase__ = '''model.diffusion_model.'''
for key in keys:
if key.startswith(A ):
lowerCAmelCase__ = state_dict[key]
lowerCAmelCase__ = config.model.params.first_stage_config.params
lowerCAmelCase__ = config.model.params.unet_config.params
lowerCAmelCase__ = VQModel(**A ).eval()
vqvae.load_state_dict(A )
lowerCAmelCase__ = UNetLDMModel(**A ).eval()
unet.load_state_dict(A )
lowerCAmelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=A , )
lowerCAmelCase__ = LDMPipeline(A , A , A )
pipeline.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
__UpperCAmelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path) | 90 | 1 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class a__ ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ('''foo.json''',)] )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = GenerationConfig(
do_sample=lowerCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_ , config_name=lowerCamelCase_ )
lowerCAmelCase__ = GenerationConfig.from_pretrained(lowerCamelCase_ , config_name=lowerCamelCase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowerCamelCase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = AutoConfig.from_pretrained('''gpt2''' )
lowerCAmelCase__ = GenerationConfig.from_model_config(lowerCamelCase_ )
lowerCAmelCase__ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowerCamelCase_ , lowerCamelCase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = GenerationConfig()
lowerCAmelCase__ = {
'''max_new_tokens''': 10_24,
'''foo''': '''bar''',
}
lowerCAmelCase__ = copy.deepcopy(lowerCamelCase_ )
lowerCAmelCase__ = generation_config.update(**lowerCamelCase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowerCamelCase_ , {'''foo''': '''bar'''} )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = GenerationConfig()
lowerCAmelCase__ = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = GenerationConfig.from_pretrained(lowerCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCAmelCase__ = GenerationConfig.from_model_config(lowerCamelCase_ )
assert not hasattr(lowerCamelCase_ , '''foo''' ) # no new kwargs should be initialized if from config
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , lowerCamelCase_ )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase__ = GenerationConfig(
do_sample=lowerCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , lowerCamelCase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = GenerationConfig.from_pretrained(lowerCamelCase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , lowerCamelCase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class a__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ) -> Any:
lowerCAmelCase__ = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = GenerationConfig(
do_sample=lowerCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCAmelCase__ = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_ , repo_id='''test-generation-config''' , push_to_hub=lowerCamelCase_ , use_auth_token=self._token )
lowerCAmelCase__ = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = GenerationConfig(
do_sample=lowerCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCAmelCase__ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=lowerCamelCase_ , use_auth_token=self._token )
lowerCAmelCase__ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) | 90 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : bool = field(default=a__ , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase__ : bool = field(
default=a__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=a__ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = v.to_dict()
return d | 90 | 1 |
'''simple docstring'''
def _snake_case ( A ) -> float:
return 10 - x * x
def _snake_case ( A , A ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(A ) * equation(A ) >= 0:
raise ValueError('''Wrong space!''' )
lowerCAmelCase__ = a
while (b - a) >= 0.01:
# Find middle point
lowerCAmelCase__ = (a + b) / 2
# Check if middle point is root
if equation(A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(A ) * equation(A ) < 0:
lowerCAmelCase__ = c
else:
lowerCAmelCase__ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 90 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCAmelCase = False
class a__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = generator.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''cyberpunk 2077'''
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCAmelCase__ = '''A painting of a squirrel eating a burger '''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.text_to_image(
prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCAmelCase__ = pipe.image_variation(lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 90 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi
def _snake_case ( A , A , A ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A ) -> bool:
lowerCAmelCase__ = str(A )
return len(A ) == 9 and set(A ) == set('''123456789''' )
def _snake_case ( ) -> int | None:
for base_num in range(9999 , 4999 , -1 ):
lowerCAmelCase__ = 100002 * base_num
if is_9_pandigital(A ):
return candidate
for base_num in range(333 , 99 , -1 ):
lowerCAmelCase__ = 1002003 * base_num
if is_9_pandigital(A ):
return candidate
return None
if __name__ == "__main__":
print(f"""{solution() = }""") | 90 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__UpperCAmelCase = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def _snake_case ( A , A , A=None ) -> List[str]:
if rng is None:
lowerCAmelCase__ = random.Random()
lowerCAmelCase__ = 1
for dim in shape:
total_dims *= dim
lowerCAmelCase__ = []
for _ in range(A ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowerCAmelCase__ = np.array(A , dtype=jnp.intaa ).reshape(A )
return output
def _snake_case ( A , A=None ) -> Union[str, Any]:
lowerCAmelCase__ = ids_tensor(A , vocab_size=2 , rng=A )
# make sure that at least one token is attended to for each batch
lowerCAmelCase__ = 1
return attn_mask
@require_flax
class a__ :
'''simple docstring'''
lowercase__ : Optional[Any] = None
lowercase__ : Union[str, Any] = ()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowerCAmelCase__ = 2
lowerCAmelCase__ = inputs['''input_ids'''].shape[-1] // 2
lowerCAmelCase__ = inputs['''input_ids'''][:max_batch_size, :sequence_length]
lowerCAmelCase__ = jnp.ones_like(lowerCamelCase_ )
lowerCAmelCase__ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowerCAmelCase__ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowerCAmelCase__ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config()
lowerCAmelCase__ = False
lowerCAmelCase__ = max_length
lowerCAmelCase__ = 0
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase__ = getattr(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = pt_model_class(lowerCamelCase_ ).eval()
lowerCAmelCase__ = load_flax_weights_in_pytorch_model(lowerCamelCase_ , flax_model.params )
lowerCAmelCase__ = flax_model.generate(lowerCamelCase_ ).sequences
lowerCAmelCase__ = pt_model.generate(torch.tensor(lowerCamelCase_ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowerCAmelCase__ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config()
lowerCAmelCase__ = False
lowerCAmelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
lowerCAmelCase__ = jit(model.generate )
lowerCAmelCase__ = jit_generate(lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config()
lowerCAmelCase__ = True
lowerCAmelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
lowerCAmelCase__ = jit(model.generate )
lowerCAmelCase__ = jit_generate(lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config()
lowerCAmelCase__ = False
lowerCAmelCase__ = max_length
lowerCAmelCase__ = 2
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
lowerCAmelCase__ = jit(model.generate )
lowerCAmelCase__ = jit_generate(lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config()
lowerCAmelCase__ = False
lowerCAmelCase__ = max_length
lowerCAmelCase__ = 2
lowerCAmelCase__ = 2
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config()
lowerCAmelCase__ = True
lowerCAmelCase__ = max_length
lowerCAmelCase__ = 0.8
lowerCAmelCase__ = 10
lowerCAmelCase__ = 0.3
lowerCAmelCase__ = 1
lowerCAmelCase__ = 8
lowerCAmelCase__ = 9
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
lowerCAmelCase__ = jit(model.generate )
lowerCAmelCase__ = jit_generate(lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config()
lowerCAmelCase__ = max_length
lowerCAmelCase__ = 1
lowerCAmelCase__ = 8
lowerCAmelCase__ = 9
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
lowerCAmelCase__ = jit(model.generate )
lowerCAmelCase__ = jit_generate(lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config()
lowerCAmelCase__ = max_length
lowerCAmelCase__ = 2
lowerCAmelCase__ = 1
lowerCAmelCase__ = 8
lowerCAmelCase__ = 9
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
lowerCAmelCase__ = jit(model.generate )
lowerCAmelCase__ = jit_generate(lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCAmelCase__ = attention_mask.at[(0, 0)].set(0 )
lowerCAmelCase__ = False
lowerCAmelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = model.generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
lowerCAmelCase__ = jit(model.generate )
lowerCAmelCase__ = jit_generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCAmelCase__ = attention_mask.at[(0, 0)].set(0 )
lowerCAmelCase__ = True
lowerCAmelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = model.generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
lowerCAmelCase__ = jit(model.generate )
lowerCAmelCase__ = jit_generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCAmelCase__ = attention_mask.at[(0, 0)].set(0 )
lowerCAmelCase__ = 2
lowerCAmelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = model.generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
lowerCAmelCase__ = jit(model.generate )
lowerCAmelCase__ = jit_generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
lowerCAmelCase__ = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCAmelCase__ = '''Hello world'''
lowerCAmelCase__ = tokenizer(lowerCamelCase_ , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowerCamelCase_ , '''do_samples''' ):
model.generate(lowerCamelCase_ , do_samples=lowerCamelCase_ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowerCamelCase_ , '''foo''' ):
lowerCAmelCase__ = {'''foo''': '''bar'''}
model.generate(lowerCamelCase_ , **lowerCamelCase_ ) | 90 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__UpperCAmelCase = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(vocab, range(len(vocab))))
__UpperCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = Path(tmpdirname)
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__UpperCAmelCase = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__UpperCAmelCase = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__UpperCAmelCase = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
__UpperCAmelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__UpperCAmelCase = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru | 90 | 1 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class a__ ( a__ ):
'''simple docstring'''
def __lt__( self , lowerCamelCase_ ) -> List[Any]:
return self[-1] < other[-1]
def __eq__( self , lowerCamelCase_ ) -> str:
return self[-1] == other[-1]
def _snake_case ( A ) -> list:
lowerCAmelCase__ = []
# sort into stacks
for element in collection:
lowerCAmelCase__ = Stack([element] )
lowerCAmelCase__ = bisect_left(A , A )
if i != len(A ):
stacks[i].append(A )
else:
stacks.append(A )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase__ = merge(*(reversed(A ) for stack in stacks) )
return collection
if __name__ == "__main__":
__UpperCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted)) | 90 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _snake_case ( ) -> Union[str, Any]:
raise RuntimeError('''CUDA out of memory.''' )
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> int:
super().__init__()
lowerCAmelCase__ = nn.Linear(3 , 4 )
lowerCAmelCase__ = nn.BatchNormad(4 )
lowerCAmelCase__ = nn.Linear(4 , 5 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_ ) ) )
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowerCAmelCase__ , lowerCAmelCase__ = mock_training_loop_function('''hello''' )
self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCamelCase_ ):
pass
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase_ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function(1_28 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase_ ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = torch.cuda.memory_allocated()
lowerCAmelCase__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_ )
lowerCAmelCase__ = release_memory(lowerCamelCase_ )
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_ ) | 90 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : torch.FloatTensor
class a__ ( a__ , a__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCamelCase_ = 3 , lowerCamelCase_ = 3 , lowerCamelCase_ = ("DownEncoderBlock2D",) , lowerCamelCase_ = ("UpDecoderBlock2D",) , lowerCamelCase_ = (64,) , lowerCamelCase_ = 1 , lowerCamelCase_ = "silu" , lowerCamelCase_ = 3 , lowerCamelCase_ = 32 , lowerCamelCase_ = 2_56 , lowerCamelCase_ = 32 , lowerCamelCase_ = None , lowerCamelCase_ = 0.18_215 , lowerCamelCase_ = "group" , ) -> Union[str, Any]:
super().__init__()
# pass init params to Encoder
lowerCAmelCase__ = Encoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , down_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , double_z=lowerCamelCase_ , )
lowerCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
lowerCAmelCase__ = VectorQuantizer(lowerCamelCase_ , lowerCamelCase_ , beta=0.25 , remap=lowerCamelCase_ , sane_index_shape=lowerCamelCase_ )
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
# pass init params to Decoder
lowerCAmelCase__ = Decoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , up_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , norm_type=lowerCamelCase_ , )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> VQEncoderOutput:
lowerCAmelCase__ = self.encoder(lowerCamelCase_ )
lowerCAmelCase__ = self.quant_conv(lowerCamelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCamelCase_ )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.quantize(lowerCamelCase_ )
else:
lowerCAmelCase__ = h
lowerCAmelCase__ = self.post_quant_conv(lowerCamelCase_ )
lowerCAmelCase__ = self.decoder(lowerCamelCase_ , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase__ = sample
lowerCAmelCase__ = self.encode(lowerCamelCase_ ).latents
lowerCAmelCase__ = self.decode(lowerCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ ) | 90 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__UpperCAmelCase = logging.getLogger(__name__)
def _snake_case ( A , A , A = None , A = None , A = None , A = None , A = None , A = False , ) -> Union[str, Any]:
lowerCAmelCase__ = bnb_quantization_config.load_in_abit
lowerCAmelCase__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
lowerCAmelCase__ = []
# custom device map
if isinstance(A , A ) and len(device_map.keys() ) > 1:
lowerCAmelCase__ = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase__ = get_keys_to_not_convert(A )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(A )
lowerCAmelCase__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase__ = []
lowerCAmelCase__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(A )
# compatibility with peft
lowerCAmelCase__ = load_in_abit
lowerCAmelCase__ = load_in_abit
lowerCAmelCase__ = get_parameter_device(A )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
lowerCAmelCase__ = replace_with_bnb_layers(A , A , modules_to_not_convert=A )
# convert param to the right dtype
lowerCAmelCase__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCAmelCase__ = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
lowerCAmelCase__ = getattr(A , A , A )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(A ):
param.to(A )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
lowerCAmelCase__ = replace_with_bnb_layers(
A , A , modules_to_not_convert=A )
lowerCAmelCase__ = get_quantized_model_device_map(
A , A , A , max_memory=A , no_split_module_classes=A , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase__ = True
lowerCAmelCase__ = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
A , A , A , dtype=bnb_quantization_config.torch_dtype , offload_folder=A , offload_state_dict=A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(A , device_map=A , offload_dir=A )
def _snake_case ( A , A , A=None , A=None , A=None ) -> List[Any]:
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase__ = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(A , A ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
lowerCAmelCase__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCAmelCase__ = {}
lowerCAmelCase__ = special_dtypes
lowerCAmelCase__ = no_split_module_classes
lowerCAmelCase__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase__ = get_balanced_memory(
A , low_zero=(device_map == '''balanced_low_0''') , max_memory=A , **A , )
lowerCAmelCase__ = max_memory
lowerCAmelCase__ = infer_auto_device_map(A , **A )
if isinstance(A , A ):
# check if don't have any quantized module on the cpu
lowerCAmelCase__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def _snake_case ( A , A , A=None , A=None ) -> Any:
if modules_to_not_convert is None:
lowerCAmelCase__ = []
lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers(
A , A , A , A )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def _snake_case ( A , A , A=None , A=None , ) -> Optional[Any]:
lowerCAmelCase__ = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase__ = []
current_key_name.append(A )
if isinstance(A , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase__ = '''.'''.join(A )
lowerCAmelCase__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
lowerCAmelCase__ = module.weight.data
if module.bias is not None:
lowerCAmelCase__ = module.bias.data
bnb_module.requires_grad_(A )
setattr(A , A , A )
lowerCAmelCase__ = True
if len(list(module.children() ) ) > 0:
lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers(
A , A , A , A )
lowerCAmelCase__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _snake_case ( A ) -> Tuple:
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase__ = deepcopy(A ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase__ = find_tied_parameters(A )
# For compatibility with Accelerate < 0.18
if isinstance(A , A ):
lowerCAmelCase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCAmelCase__ = sum(A , [] )
lowerCAmelCase__ = len(A ) > 0
# Check if it is a base model
lowerCAmelCase__ = False
if hasattr(A , '''base_model_prefix''' ):
lowerCAmelCase__ = not hasattr(A , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase__ = list(model.named_children() )
lowerCAmelCase__ = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase__ = set(A ) - set(A )
lowerCAmelCase__ = list(set(A ) ) + list(A )
# remove ".weight" from the keys
lowerCAmelCase__ = ['''.weight''', '''.bias''']
lowerCAmelCase__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase__ = name.replace(A , '''''' )
filtered_module_names.append(A )
return filtered_module_names
def _snake_case ( A ) -> Optional[int]:
for m in model.modules():
if isinstance(A , bnb.nn.Linearabit ):
return True
return False
def _snake_case ( A ) -> Union[str, Any]:
return next(parameter.parameters() ).device
def _snake_case ( A , A , A , A , A , A , A ) -> Any:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(A , A , 0 , dtype=A , value=A )
lowerCAmelCase__ = param_name
lowerCAmelCase__ = model
if "." in tensor_name:
lowerCAmelCase__ = tensor_name.split('''.''' )
for split in splits[:-1]:
lowerCAmelCase__ = getattr(A , A )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
lowerCAmelCase__ = new_module
lowerCAmelCase__ = splits[-1]
# offload weights
lowerCAmelCase__ = False
offload_weight(module._parameters[tensor_name] , A , A , index=A )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , A , index=A , )
else:
offload_weight(A , A , A , index=A )
offload_weight(A , param_name.replace('''weight''' , '''SCB''' ) , A , index=A )
set_module_tensor_to_device(A , A , '''meta''' , dtype=A , value=torch.empty(*param.size() ) ) | 90 | 1 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A ) -> bool:
return len(set(A ) ) == len(A )
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _snake_case ( A , A , A , A , A ) -> np.array:
lowerCAmelCase__ = int(np.ceil((x_end - xa) / step_size ) )
lowerCAmelCase__ = np.zeros((n + 1,) )
lowerCAmelCase__ = ya
lowerCAmelCase__ = xa
for k in range(A ):
lowerCAmelCase__ = y[k] + step_size * ode_func(A , y[k] )
lowerCAmelCase__ = y[k] + (
(step_size / 2) * (ode_func(A , y[k] ) + ode_func(x + step_size , A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 | 1 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=14 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_12 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ) -> Optional[Any]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = use_mc_token_ids
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = self.vocab_size - 1
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
if self.use_mc_token_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
lowerCAmelCase__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = CTRLModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , head_mask=lowerCamelCase_ )
model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ ) -> Tuple:
lowerCAmelCase__ = CTRLLMHeadModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = CTRLForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class a__ ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Tuple = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowercase__ : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
lowercase__ : Dict = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ : int = True
lowercase__ : Optional[Any] = False
lowercase__ : str = False
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = CTRLModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase_ , n_embd=37 )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> int:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCamelCase_ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
pass
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = CTRLModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@require_torch
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(lowerCamelCase_ )
lowerCAmelCase__ = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=lowerCamelCase_ ) # Legal the president is
lowerCAmelCase__ = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowerCAmelCase__ = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ )
self.assertListEqual(output_ids[0].tolist() , lowerCamelCase_ ) | 90 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=64 , lowerCamelCase_=None ) -> Dict:
lowerCAmelCase__ = np.random.default_rng(lowerCamelCase_ )
lowerCAmelCase__ = length
lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Any:
return self.length
def __getitem__( self , lowerCamelCase_ ) -> List[str]:
return {"x": self.x[i], "y": self.y[i]}
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> List[Any]:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Optional[Any]:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a[0] + self.b[0]
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> Any:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a + self.b
def _snake_case ( A , A = 16 ) -> Any:
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase__ = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
lowerCAmelCase__ = load_dataset('''csv''' , data_files=A )
lowerCAmelCase__ = datasets['''train'''].unique('''label''' )
lowerCAmelCase__ = {v: i for i, v in enumerate(A )}
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A , padding='''max_length''' )
if "label" in examples:
lowerCAmelCase__ = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
A , batched=A , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(A , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=2 )
lowerCAmelCase__ = DataLoader(tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=1 )
return train_dataloader, eval_dataloader | 90 | 1 |
'''simple docstring'''
def _snake_case ( A , A , A ) -> int:
if len(A ) != len(A ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
lowerCAmelCase__ = [p / w for p, w in zip(A , A )]
# Creating a copy of the list and sorting profit/weight in ascending order
lowerCAmelCase__ = sorted(A )
# declaring useful variables
lowerCAmelCase__ = len(A )
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
lowerCAmelCase__ = sorted_profit_by_weight[length - i - 1]
lowerCAmelCase__ = profit_by_weight.index(A )
lowerCAmelCase__ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
__UpperCAmelCase = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
__UpperCAmelCase = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
__UpperCAmelCase = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight) | 90 |
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__UpperCAmelCase = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def _snake_case ( A , A=None ) -> Optional[Any]:
require_version(deps[pkg] , A ) | 90 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : torch.FloatTensor
class a__ ( a__ , a__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCamelCase_ = 16 , lowerCamelCase_ = 88 , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = 1 , lowerCamelCase_ = 0.0 , lowerCamelCase_ = 32 , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = "geglu" , lowerCamelCase_ = True , lowerCamelCase_ = True , ) -> Tuple:
super().__init__()
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = attention_head_dim
lowerCAmelCase__ = num_attention_heads * attention_head_dim
lowerCAmelCase__ = in_channels
lowerCAmelCase__ = torch.nn.GroupNorm(num_groups=lowerCamelCase_ , num_channels=lowerCamelCase_ , eps=1e-6 , affine=lowerCamelCase_ )
lowerCAmelCase__ = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
# 3. Define transformers blocks
lowerCAmelCase__ = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , dropout=lowerCamelCase_ , cross_attention_dim=lowerCamelCase_ , activation_fn=lowerCamelCase_ , attention_bias=lowerCamelCase_ , double_self_attention=lowerCamelCase_ , norm_elementwise_affine=lowerCamelCase_ , )
for d in range(lowerCamelCase_ )
] )
lowerCAmelCase__ = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=1 , lowerCamelCase_=None , lowerCamelCase_ = True , ) -> Union[str, Any]:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = hidden_states.shape
lowerCAmelCase__ = batch_frames // num_frames
lowerCAmelCase__ = hidden_states
lowerCAmelCase__ = hidden_states[None, :].reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCAmelCase__ = self.norm(lowerCamelCase_ )
lowerCAmelCase__ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = self.proj_in(lowerCamelCase_ )
# 2. Blocks
for block in self.transformer_blocks:
lowerCAmelCase__ = block(
lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , timestep=lowerCamelCase_ , cross_attention_kwargs=lowerCamelCase_ , class_labels=lowerCamelCase_ , )
# 3. Output
lowerCAmelCase__ = self.proj_out(lowerCamelCase_ )
lowerCAmelCase__ = (
hidden_states[None, None, :]
.reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCAmelCase__ = hidden_states.reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=lowerCamelCase_ ) | 90 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A , A=False , A=False , A=False ) -> Union[str, Any]:
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def _snake_case ( A , A ) -> List[str]:
for i in range(config.num_hidden_layers ):
lowerCAmelCase__ = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _snake_case ( A ) -> List[str]:
lowerCAmelCase__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
def _snake_case ( A , A , A ) -> str:
lowerCAmelCase__ = dct.pop(A )
lowerCAmelCase__ = val
@torch.no_grad()
def _snake_case ( A , A ) -> Any:
lowerCAmelCase__ = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=A )
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
if "vqa" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = 3129
lowerCAmelCase__ = '''huggingface/label-files'''
lowerCAmelCase__ = '''vqa2-id2label.json'''
lowerCAmelCase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase__ = {int(A ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ = ViltForQuestionAnswering(A )
elif "nlvr" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = 2
lowerCAmelCase__ = {0: '''False''', 1: '''True'''}
lowerCAmelCase__ = {v: k for k, v in config.idalabel.items()}
lowerCAmelCase__ = 3
lowerCAmelCase__ = ViltForImagesAndTextClassification(A )
elif "irtr" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = ViltForImageAndTextRetrieval(A )
elif "mlm_itm" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = ViltForMaskedLM(A )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' )['''state_dict''']
lowerCAmelCase__ = create_rename_keys(A , A , A , A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , A )
if mlm_model or irtr_model:
lowerCAmelCase__ = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCAmelCase__ , lowerCAmelCase__ = model.load_state_dict(A , strict=A )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(A )
# Define processor
lowerCAmelCase__ = ViltImageProcessor(size=384 )
lowerCAmelCase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowerCAmelCase__ = ViltProcessor(A , A )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
lowerCAmelCase__ = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCAmelCase__ = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=A ).raw )
if mlm_model:
lowerCAmelCase__ = '''a bunch of [MASK] laying on a [MASK].'''
else:
lowerCAmelCase__ = '''How many cats are there?'''
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = model(**A )
# Verify outputs
if mlm_model:
lowerCAmelCase__ = torch.Size([1, 11, 30522] )
lowerCAmelCase__ = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCAmelCase__ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCAmelCase__ = torch.Size([1, 3129] )
lowerCAmelCase__ = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify vqa prediction equals "2"
lowerCAmelCase__ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCAmelCase__ = torch.Size([1, 2] )
lowerCAmelCase__ = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(A ).mkdir(exist_ok=A )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
processor.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__UpperCAmelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 90 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__UpperCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class a__ :
'''simple docstring'''
lowercase__ : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
lowercase__ : Optional[str] = field(
default=a__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowercase__ : Optional[str] = field(
default=a__ , metadata={"help": "The column name of the images in the files."} )
lowercase__ : Optional[str] = field(default=a__ , metadata={"help": "A folder containing the training data."} )
lowercase__ : Optional[str] = field(default=a__ , metadata={"help": "A folder containing the validation data."} )
lowercase__ : Optional[float] = field(
default=0.1_5 , metadata={"help": "Percent to split off of train for validation."} )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = {}
if self.train_dir is not None:
lowerCAmelCase__ = self.train_dir
if self.validation_dir is not None:
lowerCAmelCase__ = self.validation_dir
lowerCAmelCase__ = data_files if data_files else None
@dataclass
class a__ :
'''simple docstring'''
lowercase__ : str = field(
default=a__ , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
lowercase__ : Optional[str] = field(
default=a__ , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
lowercase__ : Optional[str] = field(
default=a__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
lowercase__ : Optional[str] = field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
lowercase__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowercase__ : str = field(default=a__ , metadata={"help": "Name or path of preprocessor config."} )
lowercase__ : bool = field(
default=a__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowercase__ : float = field(
default=0.7_5 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
lowercase__ : bool = field(
default=a__ , metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : float = field(
default=1e-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def _snake_case ( A ) -> Optional[Any]:
lowerCAmelCase__ = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def _snake_case ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , A , A )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(A )
transformers.utils.logging.set_verbosity(A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
lowerCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCAmelCase__ = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , A ) and data_args.train_val_split > 0.0:
lowerCAmelCase__ = ds['''train'''].train_test_split(data_args.train_val_split )
lowerCAmelCase__ = split['''train''']
lowerCAmelCase__ = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCAmelCase__ = ViTMAEConfig.from_pretrained(model_args.config_name , **A )
elif model_args.model_name_or_path:
lowerCAmelCase__ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **A )
else:
lowerCAmelCase__ = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowerCAmelCase__ = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **A )
elif model_args.model_name_or_path:
lowerCAmelCase__ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **A )
else:
lowerCAmelCase__ = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowerCAmelCase__ = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowerCAmelCase__ = ViTMAEForPreTraining(A )
if training_args.do_train:
lowerCAmelCase__ = ds['''train'''].column_names
else:
lowerCAmelCase__ = ds['''validation'''].column_names
if data_args.image_column_name is not None:
lowerCAmelCase__ = data_args.image_column_name
elif "image" in column_names:
lowerCAmelCase__ = '''image'''
elif "img" in column_names:
lowerCAmelCase__ = '''img'''
else:
lowerCAmelCase__ = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowerCAmelCase__ = image_processor.size['''shortest_edge''']
else:
lowerCAmelCase__ = (image_processor.size['''height'''], image_processor.size['''width'''])
lowerCAmelCase__ = Compose(
[
Lambda(lambda A : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(A , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(A ):
lowerCAmelCase__ = [transforms(A ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
lowerCAmelCase__ = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(A )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
lowerCAmelCase__ = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(A )
# Compute absolute learning rate
lowerCAmelCase__ = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowerCAmelCase__ = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
lowerCAmelCase__ = Trainer(
model=A , args=A , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=A , data_collator=A , )
# Training
if training_args.do_train:
lowerCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ = last_checkpoint
lowerCAmelCase__ = trainer.train(resume_from_checkpoint=A )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCAmelCase__ = trainer.evaluate()
trainer.log_metrics('''eval''' , A )
trainer.save_metrics('''eval''' , A )
# Write model card and (optionally) push to hub
lowerCAmelCase__ = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**A )
else:
trainer.create_model_card(**A )
def _snake_case ( A ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 90 |
'''simple docstring'''
import re
def _snake_case ( A ) -> bool:
lowerCAmelCase__ = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(A , A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895''')) | 90 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : List[str] = ["pixel_values"]
def __init__( self , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = PILImageResampling.BICUBIC , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = True , lowerCamelCase_ = 1 / 2_55 , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = True , **lowerCamelCase_ , ) -> None:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = size if size is not None else {'''shortest_edge''': 2_24}
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
lowerCAmelCase__ = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ , param_name='''crop_size''' )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_center_crop
lowerCAmelCase__ = crop_size
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase__ = do_convert_rgb
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = PILImageResampling.BICUBIC , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCAmelCase__ = get_resize_output_image_size(lowerCamelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCamelCase_ )
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(lowerCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> str:
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = ChannelDimension.FIRST , **lowerCamelCase_ , ) -> PIL.Image.Image:
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ , param_name='''size''' , default_to_square=lowerCamelCase_ )
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ , param_name='''crop_size''' , default_to_square=lowerCamelCase_ )
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase__ = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase__ = [convert_to_rgb(lowerCamelCase_ ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
lowerCAmelCase__ = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
lowerCAmelCase__ = [self.center_crop(image=lowerCamelCase_ , size=lowerCamelCase_ ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
lowerCAmelCase__ = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ ) | 90 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.txt'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
__UpperCAmelCase = {
'''facebook/esm2_t6_8M_UR50D''': 1_024,
'''facebook/esm2_t12_35M_UR50D''': 1_024,
}
def _snake_case ( A ) -> Optional[Any]:
with open(A , '''r''' ) as f:
lowerCAmelCase__ = f.read().splitlines()
return [l.strip() for l in lines]
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_="<eos>" , **lowerCamelCase_ , ) -> Tuple:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = load_vocab_file(lowerCamelCase_ )
lowerCAmelCase__ = dict(enumerate(self.all_tokens ) )
lowerCAmelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowerCAmelCase__ = unk_token
lowerCAmelCase__ = cls_token
lowerCAmelCase__ = pad_token
lowerCAmelCase__ = mask_token
lowerCAmelCase__ = eos_token
lowerCAmelCase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]:
return text.split()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False ) -> Dict:
return len(self._id_to_token )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return {token: i for i, token in enumerate(self.all_tokens )}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowerCAmelCase__ = [1] + ([0] * len(lowerCamelCase_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCamelCase_ ) + [1]
return mask
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = os.path.join(lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self.get_vocab_size(with_added_tokens=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False ) -> int:
return super()._add_tokens(lowerCamelCase_ , special_tokens=lowerCamelCase_ ) | 90 | 1 |
'''simple docstring'''
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A , A , A ) -> Union[str, Any]:
lowerCAmelCase__ = os.path.abspath(A )
logger.info(F"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
lowerCAmelCase__ = tf.train.list_variables(A )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowerCAmelCase__ = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(F"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
lowerCAmelCase__ = name[1:]
# figure out how many levels deep the name is
lowerCAmelCase__ = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(A )
# read data
lowerCAmelCase__ = tf.train.load_variable(A , A )
names.append('''/'''.join(A ) )
arrays.append(A )
logger.info(F"""Read a total of {len(A ):,} layers""" )
# Sanity check
if len(set(A ) ) != 1:
raise ValueError(F"""Found layer names with different depths (layer depth {list(set(A ) )})""" )
lowerCAmelCase__ = list(set(A ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(A , A ):
lowerCAmelCase__ = full_name.split('''/''' )
lowerCAmelCase__ = model
lowerCAmelCase__ = []
for i, m_name in enumerate(A ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
lowerCAmelCase__ = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
lowerCAmelCase__ = getattr(A , '''embeddings''' )
lowerCAmelCase__ = getattr(A , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
lowerCAmelCase__ = getattr(A , '''encoder''' )
lowerCAmelCase__ = getattr(A , '''layer''' )
lowerCAmelCase__ = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
lowerCAmelCase__ = getattr(A , '''pooler''' )
lowerCAmelCase__ = getattr(A , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
lowerCAmelCase__ = getattr(A , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
lowerCAmelCase__ = getattr(A , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
lowerCAmelCase__ = getattr(A , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
lowerCAmelCase__ = getattr(A , '''token_type_embeddings''' )
else:
raise ValueError(F"""Unknown embedding layer with name {full_name}""" )
trace.append('''weight''' )
lowerCAmelCase__ = getattr(A , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
lowerCAmelCase__ = getattr(A , '''attention''' )
lowerCAmelCase__ = getattr(A , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
lowerCAmelCase__ = getattr(A , '''attention''' )
lowerCAmelCase__ = getattr(A , '''output''' )
lowerCAmelCase__ = getattr(A , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
lowerCAmelCase__ = getattr(A , '''attention''' )
lowerCAmelCase__ = getattr(A , '''output''' )
lowerCAmelCase__ = getattr(A , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
lowerCAmelCase__ = getattr(A , '''output''' )
lowerCAmelCase__ = getattr(A , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
lowerCAmelCase__ = getattr(A , '''output''' )
lowerCAmelCase__ = getattr(A , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
lowerCAmelCase__ = getattr(A , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
lowerCAmelCase__ = getattr(A , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
lowerCAmelCase__ = getattr(A , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
lowerCAmelCase__ = getattr(A , '''intermediate''' )
lowerCAmelCase__ = getattr(A , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
lowerCAmelCase__ = getattr(A , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
lowerCAmelCase__ = getattr(A , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
lowerCAmelCase__ = getattr(A , '''weight''' )
else:
logger.warning(F"""Ignored {m_name}""" )
# for certain layers reshape is necessary
lowerCAmelCase__ = '''.'''.join(A )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , A ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' , A ):
lowerCAmelCase__ = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowerCAmelCase__ = array.transpose()
if pointer.shape == array.shape:
lowerCAmelCase__ = torch.from_numpy(A )
else:
raise ValueError(
F"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
F""" {array.shape}""" )
logger.info(F"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def _snake_case ( A , A , A ) -> Any:
# Instantiate model
logger.info(F"""Loading model based on config from {config_path}...""" )
lowerCAmelCase__ = BertConfig.from_json_file(A )
lowerCAmelCase__ = BertModel(A )
# Load weights from checkpoint
logger.info(F"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(A , A , A )
# Save pytorch-model
logger.info(F"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
__UpperCAmelCase = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 90 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = AltDiffusionPipeline
lowercase__ : Dict = TEXT_TO_IMAGE_PARAMS
lowercase__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __SCREAMING_SNAKE_CASE ( self ) -> str:
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowerCAmelCase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
lowerCAmelCase__ = CLIPTextModel(lowerCamelCase_ )
lowerCAmelCase__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCAmelCase__ = 77
lowerCAmelCase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> List[str]:
if str(lowerCamelCase_ ).startswith('''mps''' ):
lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ )
else:
lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCAmelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase__ = '''A photo of an astronaut'''
lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''numpy''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 90 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=3 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=2_24 , lowerCamelCase_=10_00 , lowerCamelCase_=[3, 3, 6, 4] , lowerCamelCase_=[48, 56, 1_12, 2_20] , ) -> Union[str, Any]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = layer_depths
lowerCAmelCase__ = embed_dims
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCamelCase_ , layer_scale_init_value=1e-5 , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = SwiftFormerModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = SwiftFormerForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowerCAmelCase__ = SwiftFormerForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = self.prepare_config_and_inputs()
lowerCAmelCase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Union[str, Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowercase__ : Tuple = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Dict = False
lowercase__ : Dict = False
lowercase__ : List[Any] = False
lowercase__ : List[str] = False
lowercase__ : Tuple = False
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = SwiftFormerModelTester(self )
lowerCAmelCase__ = ConfigTester(
self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> int:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = SwiftFormerModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
def check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = 8
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCamelCase_ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
def _config_zero_init(lowerCamelCase_ ):
lowerCAmelCase__ = copy.deepcopy(lowerCamelCase_ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCamelCase_ , lowerCamelCase_ , 1e-10 )
if isinstance(getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ ):
lowerCAmelCase__ = _config_zero_init(getattr(lowerCamelCase_ , lowerCamelCase_ ) )
setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return configs_no_init
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
def _snake_case ( ) -> List[Any]:
lowerCAmelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(lowerCamelCase_ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCamelCase_ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
lowerCAmelCase__ = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) ) | 90 |
'''simple docstring'''
def _snake_case ( A , A ) -> int:
return x if y == 0 else greatest_common_divisor(A , x % y )
def _snake_case ( A , A ) -> int:
return (x * y) // greatest_common_divisor(A , A )
def _snake_case ( A = 20 ) -> int:
lowerCAmelCase__ = 1
for i in range(1 , n + 1 ):
lowerCAmelCase__ = lcm(A , A )
return g
if __name__ == "__main__":
print(f"""{solution() = }""") | 90 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Dict = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=0 ) -> Tuple:
lowerCAmelCase__ = np.random.RandomState(lowerCamelCase_ )
lowerCAmelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = pipe(**lowerCamelCase_ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase__ = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase__ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = pipe(**lowerCamelCase_ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase__ = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = pipe(**lowerCamelCase_ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase__ = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = pipe(**lowerCamelCase_ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase__ = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = pipe(**lowerCamelCase_ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase__ = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = pipe(**lowerCamelCase_ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase__ = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = 3 * [inputs['''prompt''']]
# forward
lowerCAmelCase__ = pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images[0, -3:, -3:, -1]
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = 3 * [inputs.pop('''prompt''' )]
lowerCAmelCase__ = pipe.tokenizer(
lowerCamelCase_ , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors='''np''' , )
lowerCAmelCase__ = text_inputs['''input_ids''']
lowerCAmelCase__ = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
lowerCAmelCase__ = prompt_embeds
# forward
lowerCAmelCase__ = pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = 3 * ['''this is a negative prompt''']
lowerCAmelCase__ = negative_prompt
lowerCAmelCase__ = 3 * [inputs['''prompt''']]
# forward
lowerCAmelCase__ = pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images[0, -3:, -3:, -1]
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = 3 * [inputs.pop('''prompt''' )]
lowerCAmelCase__ = []
for p in [prompt, negative_prompt]:
lowerCAmelCase__ = pipe.tokenizer(
lowerCamelCase_ , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors='''np''' , )
lowerCAmelCase__ = text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
lowerCAmelCase__ , lowerCAmelCase__ = embeds
# forward
lowerCAmelCase__ = pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = ort.SessionOptions()
lowerCAmelCase__ = False
return options
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# using the PNDM scheduler by default
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
np.random.seed(0 )
lowerCAmelCase__ = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='''np''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''open neural network exchange'''
lowerCAmelCase__ = np.random.RandomState(0 )
lowerCAmelCase__ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase_ , output_type='''np''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''open neural network exchange'''
lowerCAmelCase__ = np.random.RandomState(0 )
lowerCAmelCase__ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase_ , output_type='''np''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = 0
def test_callback_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
lowerCAmelCase__ = latents[0, -3:, -3:, -1]
lowerCAmelCase__ = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
lowerCAmelCase__ = latents[0, -3:, -3:, -1]
lowerCAmelCase__ = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
lowerCAmelCase__ = False
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''Andromeda galaxy in a bottle'''
lowerCAmelCase__ = np.random.RandomState(0 )
pipe(
prompt=lowerCamelCase_ , num_inference_steps=5 , guidance_scale=7.5 , generator=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert pipe.safety_checker is None
lowerCAmelCase__ = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCAmelCase__ = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None | 90 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCAmelCase = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def _snake_case ( A , A=None , A=None , A=None ) -> Union[str, Any]:
lowerCAmelCase__ = True
while ask_again:
lowerCAmelCase__ = input(A )
try:
if default is not None and len(A ) == 0:
return default
return convert_value(A ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(A )
def _snake_case ( A , A=[] , A=None , A=0 ) -> List[Any]:
lowerCAmelCase__ = BulletMenu(A , A )
lowerCAmelCase__ = menu.run(default_choice=A )
return convert_value(A ) if convert_value is not None else result
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = int(A )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = int(A )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def _snake_case ( A ) -> str:
lowerCAmelCase__ = int(A )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = int(A )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = int(A )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def _snake_case ( A ) -> List[str]:
return {"yes": True, "no": False}[value.lower()]
class a__ ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = super()._format_usage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = usage.replace('''<command> [<args>] ''' , '''''' )
return usage | 90 | 1 |
'''simple docstring'''
def _snake_case ( A ) -> int:
if n == 1 or not isinstance(A , A ):
return 0
elif n == 2:
return 1
else:
lowerCAmelCase__ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _snake_case ( A ) -> int:
lowerCAmelCase__ = 0
lowerCAmelCase__ = 2
while digits < n:
index += 1
lowerCAmelCase__ = len(str(fibonacci(A ) ) )
return index
def _snake_case ( A = 1000 ) -> int:
return fibonacci_digits_index(A )
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 90 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : torch.FloatTensor
class a__ ( a__ , a__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCamelCase_ = 3 , lowerCamelCase_ = 3 , lowerCamelCase_ = ("DownEncoderBlock2D",) , lowerCamelCase_ = ("UpDecoderBlock2D",) , lowerCamelCase_ = (64,) , lowerCamelCase_ = 1 , lowerCamelCase_ = "silu" , lowerCamelCase_ = 3 , lowerCamelCase_ = 32 , lowerCamelCase_ = 2_56 , lowerCamelCase_ = 32 , lowerCamelCase_ = None , lowerCamelCase_ = 0.18_215 , lowerCamelCase_ = "group" , ) -> Union[str, Any]:
super().__init__()
# pass init params to Encoder
lowerCAmelCase__ = Encoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , down_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , double_z=lowerCamelCase_ , )
lowerCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
lowerCAmelCase__ = VectorQuantizer(lowerCamelCase_ , lowerCamelCase_ , beta=0.25 , remap=lowerCamelCase_ , sane_index_shape=lowerCamelCase_ )
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
# pass init params to Decoder
lowerCAmelCase__ = Decoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , up_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , norm_type=lowerCamelCase_ , )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> VQEncoderOutput:
lowerCAmelCase__ = self.encoder(lowerCamelCase_ )
lowerCAmelCase__ = self.quant_conv(lowerCamelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCamelCase_ )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.quantize(lowerCamelCase_ )
else:
lowerCAmelCase__ = h
lowerCAmelCase__ = self.post_quant_conv(lowerCamelCase_ )
lowerCAmelCase__ = self.decoder(lowerCamelCase_ , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase__ = sample
lowerCAmelCase__ = self.encode(lowerCamelCase_ ).latents
lowerCAmelCase__ = self.decode(lowerCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ ) | 90 | 1 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class a__ :
'''simple docstring'''
def __init__( self ) -> None:
lowerCAmelCase__ = [2, 1, 2, -1]
lowerCAmelCase__ = [1, 2, 3, 4]
def __SCREAMING_SNAKE_CASE ( self ) -> list[float]:
lowerCAmelCase__ = len(self.first_signal )
lowerCAmelCase__ = len(self.second_signal )
lowerCAmelCase__ = max(lowerCamelCase_ , lowerCamelCase_ )
# create a zero matrix of max_length x max_length
lowerCAmelCase__ = [[0] * max_length for i in range(lowerCamelCase_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCamelCase_ ):
lowerCAmelCase__ = deque(self.second_signal )
rotated_signal.rotate(lowerCamelCase_ )
for j, item in enumerate(lowerCamelCase_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
lowerCAmelCase__ = np.matmul(np.transpose(lowerCamelCase_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCamelCase_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod() | 90 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = list[list[int]]
# assigning initial values to the grid
__UpperCAmelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCAmelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _snake_case ( A , A , A , A ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _snake_case ( A ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _snake_case ( A ) -> Matrix | None:
if location := find_empty_location(A ):
lowerCAmelCase__ , lowerCAmelCase__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A , A , A , A ):
lowerCAmelCase__ = digit
if sudoku(A ) is not None:
return grid
lowerCAmelCase__ = 0
return None
def _snake_case ( A ) -> None:
for row in grid:
for cell in row:
print(A , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__UpperCAmelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''') | 90 | 1 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : bool = field(default=a__ , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase__ : bool = field(
default=a__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=a__ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = v.to_dict()
return d | 90 |
'''simple docstring'''
def _snake_case ( A ) -> int:
if n == 1 or not isinstance(A , A ):
return 0
elif n == 2:
return 1
else:
lowerCAmelCase__ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _snake_case ( A ) -> int:
lowerCAmelCase__ = 0
lowerCAmelCase__ = 2
while digits < n:
index += 1
lowerCAmelCase__ = len(str(fibonacci(A ) ) )
return index
def _snake_case ( A = 1000 ) -> int:
return fibonacci_digits_index(A )
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 90 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__UpperCAmelCase = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Dict = "ernie_m"
lowercase__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , lowerCamelCase_ = 25_00_02 , lowerCamelCase_ = 7_68 , lowerCamelCase_ = 12 , lowerCamelCase_ = 12 , lowerCamelCase_ = 30_72 , lowerCamelCase_ = "gelu" , lowerCamelCase_ = 0.1 , lowerCamelCase_ = 0.1 , lowerCamelCase_ = 5_14 , lowerCamelCase_ = 0.02 , lowerCamelCase_ = 1 , lowerCamelCase_ = 1e-05 , lowerCamelCase_=None , lowerCamelCase_=False , lowerCamelCase_=0.0 , **lowerCamelCase_ , ) -> str:
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = classifier_dropout
lowerCAmelCase__ = is_decoder
lowerCAmelCase__ = act_dropout | 90 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def _snake_case ( A ) -> int:
return choice(A )
def _snake_case ( A , A ) -> int:
lowerCAmelCase__ = random_pivot(A )
# partition based on pivot
# linear time
lowerCAmelCase__ = [e for e in lst if e < pivot]
lowerCAmelCase__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(A ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(A ) < k - 1:
return kth_number(A , k - len(A ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(A , A )
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 90 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 90 | 1 |
'''simple docstring'''
def _snake_case ( A , A ) -> int:
while second != 0:
lowerCAmelCase__ = first & second
first ^= second
lowerCAmelCase__ = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input('''Enter the first number: ''').strip())
__UpperCAmelCase = int(input('''Enter the second number: ''').strip())
print(f"""{add(first, second) = }""") | 90 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('''KEY''')
__UpperCAmelCase = TypeVar('''VAL''')
@dataclass(frozen=a__ , slots=a__ )
class a__ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowercase__ : KEY
lowercase__ : VAL
class a__ ( _Item ):
'''simple docstring'''
def __init__( self ) -> None:
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __bool__( self ) -> bool:
return False
__UpperCAmelCase = _DeletedItem()
class a__ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ = 8 , lowerCamelCase_ = 0.75 ) -> None:
lowerCAmelCase__ = initial_block_size
lowerCAmelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCAmelCase__ = capacity_factor
lowerCAmelCase__ = 0
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return hash(lowerCamelCase_ ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool:
lowerCAmelCase__ = self._buckets[ind]
if not stored:
lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = self._buckets
lowerCAmelCase__ = [None] * new_size
lowerCAmelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Iterator[int]:
lowerCAmelCase__ = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCAmelCase__ = self._get_next_ind(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
break
def __setitem__( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_ , lowerCamelCase_ )
def __delitem__( self , lowerCamelCase_ ) -> None:
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCAmelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , lowerCamelCase_ ) -> VAL:
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
lowerCAmelCase__ = ''' ,'''.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})""" | 90 | 1 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A , A , A , A , A , ) -> None:
lowerCAmelCase__ = len(A )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(A ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A , A , )
def _snake_case ( A ) -> None:
lowerCAmelCase__ = []
depth_first_search([] , [] , [] , A , A )
# Print all the boards
for board in boards:
for column in board:
print(A )
print('''''' )
print(len(A ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4) | 90 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _snake_case ( A , A , A ) -> Union[str, Any]:
lowerCAmelCase__ = OmegaConf.load(A )
lowerCAmelCase__ = torch.load(A , map_location='''cpu''' )['''model''']
lowerCAmelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase__ = {}
lowerCAmelCase__ = '''first_stage_model.'''
for key in keys:
if key.startswith(A ):
lowerCAmelCase__ = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase__ = {}
lowerCAmelCase__ = '''model.diffusion_model.'''
for key in keys:
if key.startswith(A ):
lowerCAmelCase__ = state_dict[key]
lowerCAmelCase__ = config.model.params.first_stage_config.params
lowerCAmelCase__ = config.model.params.unet_config.params
lowerCAmelCase__ = VQModel(**A ).eval()
vqvae.load_state_dict(A )
lowerCAmelCase__ = UNetLDMModel(**A ).eval()
unet.load_state_dict(A )
lowerCAmelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=A , )
lowerCAmelCase__ = LDMPipeline(A , A , A )
pipeline.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
__UpperCAmelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path) | 90 | 1 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _snake_case ( A ) -> Any:
lowerCAmelCase__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A , A )
def _snake_case ( A ) -> Optional[int]:
lowerCAmelCase__ , lowerCAmelCase__ = emb.weight.shape
lowerCAmelCase__ = nn.Linear(A , A , bias=A )
lowerCAmelCase__ = emb.weight.data
return lin_layer
def _snake_case ( A , A="facebook/mbart-large-en-ro" , A=False , A=False ) -> int:
lowerCAmelCase__ = torch.load(A , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A )
lowerCAmelCase__ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowerCAmelCase__ = MBartConfig.from_pretrained(A , vocab_size=A )
if mbart_aa and finetuned:
lowerCAmelCase__ = '''relu'''
lowerCAmelCase__ = state_dict['''decoder.embed_tokens.weight''']
lowerCAmelCase__ = MBartForConditionalGeneration(A )
model.model.load_state_dict(A )
if finetuned:
lowerCAmelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 90 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : bool = field(default=a__ , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase__ : bool = field(
default=a__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=a__ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = v.to_dict()
return d | 90 | 1 |
'''simple docstring'''
__UpperCAmelCase = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_9344,
"knot": 1.852,
}
__UpperCAmelCase = {
"km/h": 1.0,
"m/s": 0.2_7777_7778,
"mph": 0.6_2137_1192,
"knot": 0.5_3995_6803,
}
def _snake_case ( A , A , A ) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
lowerCAmelCase__ = (
F"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"""
F"""Valid values are: {", ".join(A )}"""
)
raise ValueError(A )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCAmelCase = False
class a__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = generator.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''cyberpunk 2077'''
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCAmelCase__ = '''A painting of a squirrel eating a burger '''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.text_to_image(
prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCAmelCase__ = pipe.image_variation(lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 90 | 1 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
__UpperCAmelCase = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
__UpperCAmelCase = '''>>zh<<'''
__UpperCAmelCase = '''Helsinki-NLP/'''
if is_torch_available():
__UpperCAmelCase = '''pt'''
elif is_tf_available():
__UpperCAmelCase = '''tf'''
else:
__UpperCAmelCase = '''jax'''
@require_sentencepiece
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Tuple = MarianTokenizer
lowercase__ : Dict = False
lowercase__ : Dict = True
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
super().setUp()
lowerCAmelCase__ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
lowerCAmelCase__ = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
lowerCAmelCase__ = Path(self.tmpdirname )
save_json(lowerCamelCase_ , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(lowerCamelCase_ , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCamelCase_ , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(lowerCamelCase_ , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
lowerCAmelCase__ = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Tuple:
return (
"This is a test",
"This is a test",
)
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = '''</s>'''
lowerCAmelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(lowerCamelCase_ ) , 9 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""" )
lowerCAmelCase__ = en_de_tokenizer(['''I am a small frog'''] , return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(lowerCamelCase_ , batch.input_ids[0] )
lowerCAmelCase__ = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = [x.name for x in Path(lowerCamelCase_ ).glob('''*''' )]
self.assertIn('''source.spm''' , lowerCamelCase_ )
MarianTokenizer.from_pretrained(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = tok(
['''I am a small frog''' * 10_00, '''I am a small frog'''] , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# fmt: off
lowerCAmelCase__ = {'''input_ids''': [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
lowerCAmelCase__ = '''Tämä on testi'''
lowerCAmelCase__ = '''This is a test'''
lowerCAmelCase__ = [76, 7, 20_47, 2]
lowerCAmelCase__ = [69, 12, 11, 9_40, 2]
lowerCAmelCase__ = tokenizer(lowerCamelCase_ ).input_ids
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = tokenizer(text_target=lowerCamelCase_ ).input_ids
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) | 90 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A ) -> bool:
lowerCAmelCase__ = str(A )
return len(A ) == 9 and set(A ) == set('''123456789''' )
def _snake_case ( ) -> int | None:
for base_num in range(9999 , 4999 , -1 ):
lowerCAmelCase__ = 100002 * base_num
if is_9_pandigital(A ):
return candidate
for base_num in range(333 , 99 , -1 ):
lowerCAmelCase__ = 1002003 * base_num
if is_9_pandigital(A ):
return candidate
return None
if __name__ == "__main__":
print(f"""{solution() = }""") | 90 | 1 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__UpperCAmelCase = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def _snake_case ( ) -> Optional[Any]:
lowerCAmelCase__ = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCAmelCase__ = g.get_repo('''huggingface/diffusers''' )
lowerCAmelCase__ = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCAmelCase__ = sorted(issue.get_comments() , key=lambda A : i.created_at , reverse=A )
lowerCAmelCase__ = comments[0] if len(A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main() | 90 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__UpperCAmelCase = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(vocab, range(len(vocab))))
__UpperCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = Path(tmpdirname)
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__UpperCAmelCase = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__UpperCAmelCase = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__UpperCAmelCase = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
__UpperCAmelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__UpperCAmelCase = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru | 90 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A ) -> str:
lowerCAmelCase__ = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowerCAmelCase__ = [144, 192, 240]
lowerCAmelCase__ = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowerCAmelCase__ = [96, 120, 144]
lowerCAmelCase__ = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowerCAmelCase__ = [64, 80, 96]
lowerCAmelCase__ = [16, 16, 24, 48, 64, 80, 320]
lowerCAmelCase__ = 0.05
lowerCAmelCase__ = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCAmelCase__ = 512
lowerCAmelCase__ = 16
lowerCAmelCase__ = 21
lowerCAmelCase__ = '''pascal-voc-id2label.json'''
else:
lowerCAmelCase__ = 1000
lowerCAmelCase__ = '''imagenet-1k-id2label.json'''
lowerCAmelCase__ = '''huggingface/label-files'''
lowerCAmelCase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase__ = {int(A ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
return config
def _snake_case ( A , A=False ) -> str:
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowerCAmelCase__ = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowerCAmelCase__ = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
lowerCAmelCase__ = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
lowerCAmelCase__ = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
lowerCAmelCase__ = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
lowerCAmelCase__ = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
lowerCAmelCase__ = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
lowerCAmelCase__ = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
lowerCAmelCase__ = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
lowerCAmelCase__ = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCAmelCase__ = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCAmelCase__ = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowerCAmelCase__ = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
lowerCAmelCase__ = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
lowerCAmelCase__ = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowerCAmelCase__ = name.replace(F""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if F""".global_rep.{i}.bias""" in name:
lowerCAmelCase__ = name.replace(F""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
lowerCAmelCase__ = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
lowerCAmelCase__ = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
lowerCAmelCase__ = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
lowerCAmelCase__ = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
lowerCAmelCase__ = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
lowerCAmelCase__ = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
lowerCAmelCase__ = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
lowerCAmelCase__ = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
lowerCAmelCase__ = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
lowerCAmelCase__ = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
lowerCAmelCase__ = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
lowerCAmelCase__ = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
lowerCAmelCase__ = '''mobilevit.''' + name
return name
def _snake_case ( A , A , A=False ) -> Tuple:
if base_model:
lowerCAmelCase__ = ''''''
else:
lowerCAmelCase__ = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ = orig_state_dict.pop(A )
if key[:8] == "encoder.":
lowerCAmelCase__ = key[8:]
if "qkv" in key:
lowerCAmelCase__ = key.split('''.''' )
lowerCAmelCase__ = int(key_split[0][6:] ) - 1
lowerCAmelCase__ = int(key_split[3] )
lowerCAmelCase__ = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowerCAmelCase__ = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowerCAmelCase__ = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[dim : dim * 2, :]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
else:
lowerCAmelCase__ = val
return orig_state_dict
def _snake_case ( ) -> Dict:
lowerCAmelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase__ = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def _snake_case ( A , A , A , A=False ) -> int:
lowerCAmelCase__ = get_mobilevit_config(A )
# load original state_dict
lowerCAmelCase__ = torch.load(A , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCAmelCase__ = MobileViTForSemanticSegmentation(A ).eval()
else:
lowerCAmelCase__ = MobileViTForImageClassification(A ).eval()
lowerCAmelCase__ = convert_state_dict(A , A )
model.load_state_dict(A )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCAmelCase__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCAmelCase__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCAmelCase__ = model(**A )
lowerCAmelCase__ = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowerCAmelCase__ = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowerCAmelCase__ = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowerCAmelCase__ = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , A , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowerCAmelCase__ = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
lowerCAmelCase__ = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
lowerCAmelCase__ = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , A , atol=1E-4 )
Path(A ).mkdir(exist_ok=A )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A )
if push_to_hub:
lowerCAmelCase__ = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
lowerCAmelCase__ = model_mapping[mobilevit_name]
image_processor.push_to_hub(A , organization='''apple''' )
model.push_to_hub(A , organization='''apple''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__UpperCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 90 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _snake_case ( ) -> Union[str, Any]:
raise RuntimeError('''CUDA out of memory.''' )
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> int:
super().__init__()
lowerCAmelCase__ = nn.Linear(3 , 4 )
lowerCAmelCase__ = nn.BatchNormad(4 )
lowerCAmelCase__ = nn.Linear(4 , 5 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_ ) ) )
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowerCAmelCase__ , lowerCAmelCase__ = mock_training_loop_function('''hello''' )
self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCamelCase_ ):
pass
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase_ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function(1_28 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase_ ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = torch.cuda.memory_allocated()
lowerCAmelCase__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_ )
lowerCAmelCase__ = release_memory(lowerCamelCase_ )
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_ ) | 90 | 1 |
'''simple docstring'''
def _snake_case ( A ) -> list[int]:
if length <= 0 or not isinstance(A , A ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(A )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10)) | 90 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__UpperCAmelCase = logging.getLogger(__name__)
def _snake_case ( A , A , A = None , A = None , A = None , A = None , A = None , A = False , ) -> Union[str, Any]:
lowerCAmelCase__ = bnb_quantization_config.load_in_abit
lowerCAmelCase__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
lowerCAmelCase__ = []
# custom device map
if isinstance(A , A ) and len(device_map.keys() ) > 1:
lowerCAmelCase__ = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase__ = get_keys_to_not_convert(A )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(A )
lowerCAmelCase__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase__ = []
lowerCAmelCase__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(A )
# compatibility with peft
lowerCAmelCase__ = load_in_abit
lowerCAmelCase__ = load_in_abit
lowerCAmelCase__ = get_parameter_device(A )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
lowerCAmelCase__ = replace_with_bnb_layers(A , A , modules_to_not_convert=A )
# convert param to the right dtype
lowerCAmelCase__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCAmelCase__ = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
lowerCAmelCase__ = getattr(A , A , A )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(A ):
param.to(A )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
lowerCAmelCase__ = replace_with_bnb_layers(
A , A , modules_to_not_convert=A )
lowerCAmelCase__ = get_quantized_model_device_map(
A , A , A , max_memory=A , no_split_module_classes=A , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase__ = True
lowerCAmelCase__ = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
A , A , A , dtype=bnb_quantization_config.torch_dtype , offload_folder=A , offload_state_dict=A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(A , device_map=A , offload_dir=A )
def _snake_case ( A , A , A=None , A=None , A=None ) -> List[Any]:
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase__ = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(A , A ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
lowerCAmelCase__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCAmelCase__ = {}
lowerCAmelCase__ = special_dtypes
lowerCAmelCase__ = no_split_module_classes
lowerCAmelCase__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase__ = get_balanced_memory(
A , low_zero=(device_map == '''balanced_low_0''') , max_memory=A , **A , )
lowerCAmelCase__ = max_memory
lowerCAmelCase__ = infer_auto_device_map(A , **A )
if isinstance(A , A ):
# check if don't have any quantized module on the cpu
lowerCAmelCase__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def _snake_case ( A , A , A=None , A=None ) -> Any:
if modules_to_not_convert is None:
lowerCAmelCase__ = []
lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers(
A , A , A , A )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def _snake_case ( A , A , A=None , A=None , ) -> Optional[Any]:
lowerCAmelCase__ = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase__ = []
current_key_name.append(A )
if isinstance(A , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase__ = '''.'''.join(A )
lowerCAmelCase__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
lowerCAmelCase__ = module.weight.data
if module.bias is not None:
lowerCAmelCase__ = module.bias.data
bnb_module.requires_grad_(A )
setattr(A , A , A )
lowerCAmelCase__ = True
if len(list(module.children() ) ) > 0:
lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers(
A , A , A , A )
lowerCAmelCase__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _snake_case ( A ) -> Tuple:
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase__ = deepcopy(A ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase__ = find_tied_parameters(A )
# For compatibility with Accelerate < 0.18
if isinstance(A , A ):
lowerCAmelCase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCAmelCase__ = sum(A , [] )
lowerCAmelCase__ = len(A ) > 0
# Check if it is a base model
lowerCAmelCase__ = False
if hasattr(A , '''base_model_prefix''' ):
lowerCAmelCase__ = not hasattr(A , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase__ = list(model.named_children() )
lowerCAmelCase__ = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase__ = set(A ) - set(A )
lowerCAmelCase__ = list(set(A ) ) + list(A )
# remove ".weight" from the keys
lowerCAmelCase__ = ['''.weight''', '''.bias''']
lowerCAmelCase__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase__ = name.replace(A , '''''' )
filtered_module_names.append(A )
return filtered_module_names
def _snake_case ( A ) -> Optional[int]:
for m in model.modules():
if isinstance(A , bnb.nn.Linearabit ):
return True
return False
def _snake_case ( A ) -> Union[str, Any]:
return next(parameter.parameters() ).device
def _snake_case ( A , A , A , A , A , A , A ) -> Any:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(A , A , 0 , dtype=A , value=A )
lowerCAmelCase__ = param_name
lowerCAmelCase__ = model
if "." in tensor_name:
lowerCAmelCase__ = tensor_name.split('''.''' )
for split in splits[:-1]:
lowerCAmelCase__ = getattr(A , A )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
lowerCAmelCase__ = new_module
lowerCAmelCase__ = splits[-1]
# offload weights
lowerCAmelCase__ = False
offload_weight(module._parameters[tensor_name] , A , A , index=A )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , A , index=A , )
else:
offload_weight(A , A , A , index=A )
offload_weight(A , param_name.replace('''weight''' , '''SCB''' ) , A , index=A )
set_module_tensor_to_device(A , A , '''meta''' , dtype=A , value=torch.empty(*param.size() ) ) | 90 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=3 , lowerCamelCase_=2_24 , lowerCamelCase_=30 , lowerCamelCase_=4_00 , lowerCamelCase_=True , lowerCamelCase_=None , lowerCamelCase_=True , lowerCamelCase_=[0.5, 0.5, 0.5] , lowerCamelCase_=[0.5, 0.5, 0.5] , ) -> Any:
lowerCAmelCase__ = size if size is not None else {'''height''': 18, '''width''': 18}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean
lowerCAmelCase__ = image_std
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : int = ViTImageProcessor if is_vision_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = EfficientFormerImageProcessorTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''size''' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Initialize image_processor
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
lowerCAmelCase__ = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase__ = image_processor(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
# Initialize image_processor
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
lowerCAmelCase__ = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase__ = image_processor(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# Initialize image_processor
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase__ = image_processor(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , ) | 90 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _snake_case ( A , A , A , A , A ) -> np.array:
lowerCAmelCase__ = int(np.ceil((x_end - xa) / step_size ) )
lowerCAmelCase__ = np.zeros((n + 1,) )
lowerCAmelCase__ = ya
lowerCAmelCase__ = xa
for k in range(A ):
lowerCAmelCase__ = y[k] + step_size * ode_func(A , y[k] )
lowerCAmelCase__ = y[k] + (
(step_size / 2) * (ode_func(A , y[k] ) + ode_func(x + step_size , A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 | 1 |
'''simple docstring'''
import os
import sys
__UpperCAmelCase = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__UpperCAmelCase = [
'''torch''',
'''numpy''',
'''tokenizers''',
'''filelock''',
'''requests''',
'''tqdm''',
'''regex''',
'''sentencepiece''',
'''sacremoses''',
'''importlib_metadata''',
'''huggingface_hub''',
]
@add_start_docstrings(AutoConfig.__doc__ )
def _snake_case ( *A , **A ) -> Optional[int]:
return AutoConfig.from_pretrained(*A , **A )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _snake_case ( *A , **A ) -> Optional[int]:
return AutoTokenizer.from_pretrained(*A , **A )
@add_start_docstrings(AutoModel.__doc__ )
def _snake_case ( *A , **A ) -> List[Any]:
return AutoModel.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _snake_case ( *A , **A ) -> Any:
return AutoModelForCausalLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _snake_case ( *A , **A ) -> Union[str, Any]:
return AutoModelForMaskedLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _snake_case ( *A , **A ) -> int:
return AutoModelForSequenceClassification.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _snake_case ( *A , **A ) -> Optional[Any]:
return AutoModelForQuestionAnswering.from_pretrained(*A , **A ) | 90 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=64 , lowerCamelCase_=None ) -> Dict:
lowerCAmelCase__ = np.random.default_rng(lowerCamelCase_ )
lowerCAmelCase__ = length
lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Any:
return self.length
def __getitem__( self , lowerCamelCase_ ) -> List[str]:
return {"x": self.x[i], "y": self.y[i]}
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> List[Any]:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Optional[Any]:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a[0] + self.b[0]
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> Any:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a + self.b
def _snake_case ( A , A = 16 ) -> Any:
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase__ = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
lowerCAmelCase__ = load_dataset('''csv''' , data_files=A )
lowerCAmelCase__ = datasets['''train'''].unique('''label''' )
lowerCAmelCase__ = {v: i for i, v in enumerate(A )}
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A , padding='''max_length''' )
if "label" in examples:
lowerCAmelCase__ = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
A , batched=A , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(A , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=2 )
lowerCAmelCase__ = DataLoader(tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=1 )
return train_dataloader, eval_dataloader | 90 | 1 |
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('''socket.socket''' )
@patch('''builtins.open''' )
def _snake_case ( A , A ) -> List[Any]:
# ===== initialization =====
lowerCAmelCase__ = Mock()
lowerCAmelCase__ = conn, Mock()
lowerCAmelCase__ = iter([1, None] )
lowerCAmelCase__ = lambda A : next(A )
# ===== invoke =====
send_file(filename='''mytext.txt''' , testing=A )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once() | 90 |
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__UpperCAmelCase = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def _snake_case ( A , A=None ) -> Optional[Any]:
require_version(deps[pkg] , A ) | 90 | 1 |
'''simple docstring'''
__UpperCAmelCase = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def _snake_case ( A ) -> str:
assert type(A ) in (int, float) and decimal == int(A )
lowerCAmelCase__ = int(A )
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = False
if decimal < 0:
lowerCAmelCase__ = True
decimal *= -1
while decimal > 0:
lowerCAmelCase__ , lowerCAmelCase__ = divmod(A , 16 )
lowerCAmelCase__ = values[remainder] + hexadecimal
lowerCAmelCase__ = '''0x''' + hexadecimal
if negative:
lowerCAmelCase__ = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A , A=False , A=False , A=False ) -> Union[str, Any]:
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def _snake_case ( A , A ) -> List[str]:
for i in range(config.num_hidden_layers ):
lowerCAmelCase__ = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _snake_case ( A ) -> List[str]:
lowerCAmelCase__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
def _snake_case ( A , A , A ) -> str:
lowerCAmelCase__ = dct.pop(A )
lowerCAmelCase__ = val
@torch.no_grad()
def _snake_case ( A , A ) -> Any:
lowerCAmelCase__ = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=A )
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
if "vqa" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = 3129
lowerCAmelCase__ = '''huggingface/label-files'''
lowerCAmelCase__ = '''vqa2-id2label.json'''
lowerCAmelCase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase__ = {int(A ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ = ViltForQuestionAnswering(A )
elif "nlvr" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = 2
lowerCAmelCase__ = {0: '''False''', 1: '''True'''}
lowerCAmelCase__ = {v: k for k, v in config.idalabel.items()}
lowerCAmelCase__ = 3
lowerCAmelCase__ = ViltForImagesAndTextClassification(A )
elif "irtr" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = ViltForImageAndTextRetrieval(A )
elif "mlm_itm" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = ViltForMaskedLM(A )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' )['''state_dict''']
lowerCAmelCase__ = create_rename_keys(A , A , A , A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , A )
if mlm_model or irtr_model:
lowerCAmelCase__ = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCAmelCase__ , lowerCAmelCase__ = model.load_state_dict(A , strict=A )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(A )
# Define processor
lowerCAmelCase__ = ViltImageProcessor(size=384 )
lowerCAmelCase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowerCAmelCase__ = ViltProcessor(A , A )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
lowerCAmelCase__ = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCAmelCase__ = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=A ).raw )
if mlm_model:
lowerCAmelCase__ = '''a bunch of [MASK] laying on a [MASK].'''
else:
lowerCAmelCase__ = '''How many cats are there?'''
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = model(**A )
# Verify outputs
if mlm_model:
lowerCAmelCase__ = torch.Size([1, 11, 30522] )
lowerCAmelCase__ = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCAmelCase__ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCAmelCase__ = torch.Size([1, 3129] )
lowerCAmelCase__ = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify vqa prediction equals "2"
lowerCAmelCase__ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCAmelCase__ = torch.Size([1, 2] )
lowerCAmelCase__ = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(A ).mkdir(exist_ok=A )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
processor.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__UpperCAmelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 90 | 1 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__UpperCAmelCase = logging.getLogger(__name__)
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30_522, type=int)
__UpperCAmelCase = parser.parse_args()
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file, '''rb''') as fp:
__UpperCAmelCase = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
__UpperCAmelCase = Counter()
for tk_ids in data:
counter.update(tk_ids)
__UpperCAmelCase = [0] * args.vocab_size
for k, v in counter.items():
__UpperCAmelCase = v
logger.info(f"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL) | 90 |
'''simple docstring'''
import re
def _snake_case ( A ) -> bool:
lowerCAmelCase__ = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(A , A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895''')) | 90 | 1 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _snake_case ( ) -> Union[str, Any]:
raise RuntimeError('''CUDA out of memory.''' )
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> int:
super().__init__()
lowerCAmelCase__ = nn.Linear(3 , 4 )
lowerCAmelCase__ = nn.BatchNormad(4 )
lowerCAmelCase__ = nn.Linear(4 , 5 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_ ) ) )
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowerCAmelCase__ , lowerCAmelCase__ = mock_training_loop_function('''hello''' )
self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCamelCase_ ):
pass
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase_ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function(1_28 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase_ ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = torch.cuda.memory_allocated()
lowerCAmelCase__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_ )
lowerCAmelCase__ = release_memory(lowerCamelCase_ )
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_ ) | 90 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.txt'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
__UpperCAmelCase = {
'''facebook/esm2_t6_8M_UR50D''': 1_024,
'''facebook/esm2_t12_35M_UR50D''': 1_024,
}
def _snake_case ( A ) -> Optional[Any]:
with open(A , '''r''' ) as f:
lowerCAmelCase__ = f.read().splitlines()
return [l.strip() for l in lines]
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_="<eos>" , **lowerCamelCase_ , ) -> Tuple:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = load_vocab_file(lowerCamelCase_ )
lowerCAmelCase__ = dict(enumerate(self.all_tokens ) )
lowerCAmelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowerCAmelCase__ = unk_token
lowerCAmelCase__ = cls_token
lowerCAmelCase__ = pad_token
lowerCAmelCase__ = mask_token
lowerCAmelCase__ = eos_token
lowerCAmelCase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]:
return text.split()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False ) -> Dict:
return len(self._id_to_token )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return {token: i for i, token in enumerate(self.all_tokens )}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowerCAmelCase__ = [1] + ([0] * len(lowerCamelCase_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCamelCase_ ) + [1]
return mask
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = os.path.join(lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self.get_vocab_size(with_added_tokens=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False ) -> int:
return super()._add_tokens(lowerCamelCase_ , special_tokens=lowerCamelCase_ ) | 90 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[str] = KandinskyVaaImgaImgPipeline
lowercase__ : Any = ["image_embeds", "negative_image_embeds", "image"]
lowercase__ : Union[str, Any] = [
"image_embeds",
"negative_image_embeds",
"image",
]
lowercase__ : List[str] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowercase__ : Union[str, Any] = False
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return 32
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return 32
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return self.time_input_dim
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self.time_input_dim * 4
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return 1_00
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
torch.manual_seed(0 )
lowerCAmelCase__ = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCAmelCase__ = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCAmelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = self.dummy_unet
lowerCAmelCase__ = self.dummy_movq
lowerCAmelCase__ = {
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00_085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowerCAmelCase__ = DDIMScheduler(**lowerCamelCase_ )
lowerCAmelCase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> Dict:
lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(lowerCamelCase_ ).startswith('''mps''' ):
lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ )
else:
lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCAmelCase__ = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = '''cpu'''
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**lowerCamelCase_ )
lowerCAmelCase__ = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ) , return_dict=lowerCamelCase_ , )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowerCAmelCase__ = '''A red cartoon frog, 4k'''
lowerCAmelCase__ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCAmelCase__ = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
lowerCAmelCase__ = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCAmelCase__ , lowerCAmelCase__ = pipe_prior(
lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowerCAmelCase__ = pipeline(
image=lowerCamelCase_ , image_embeds=lowerCamelCase_ , negative_image_embeds=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ ) | 90 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = AltDiffusionPipeline
lowercase__ : Dict = TEXT_TO_IMAGE_PARAMS
lowercase__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __SCREAMING_SNAKE_CASE ( self ) -> str:
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowerCAmelCase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
lowerCAmelCase__ = CLIPTextModel(lowerCamelCase_ )
lowerCAmelCase__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCAmelCase__ = 77
lowerCAmelCase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> List[str]:
if str(lowerCamelCase_ ).startswith('''mps''' ):
lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ )
else:
lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCAmelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase__ = '''A photo of an astronaut'''
lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''numpy''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 90 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 90 |
'''simple docstring'''
def _snake_case ( A , A ) -> int:
return x if y == 0 else greatest_common_divisor(A , x % y )
def _snake_case ( A , A ) -> int:
return (x * y) // greatest_common_divisor(A , A )
def _snake_case ( A = 20 ) -> int:
lowerCAmelCase__ = 1
for i in range(1 , n + 1 ):
lowerCAmelCase__ = lcm(A , A )
return g
if __name__ == "__main__":
print(f"""{solution() = }""") | 90 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Any = "upernet"
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=5_12 , lowerCamelCase_=0.02 , lowerCamelCase_=[1, 2, 3, 6] , lowerCamelCase_=True , lowerCamelCase_=0.4 , lowerCamelCase_=3_84 , lowerCamelCase_=2_56 , lowerCamelCase_=1 , lowerCamelCase_=False , lowerCamelCase_=2_55 , **lowerCamelCase_ , ) -> Union[str, Any]:
super().__init__(**lowerCamelCase_ )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowerCAmelCase__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = backbone_config.get('''model_type''' )
lowerCAmelCase__ = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase__ = config_class.from_dict(lowerCamelCase_ )
lowerCAmelCase__ = backbone_config
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = pool_scales
lowerCAmelCase__ = use_auxiliary_head
lowerCAmelCase__ = auxiliary_loss_weight
lowerCAmelCase__ = auxiliary_in_channels
lowerCAmelCase__ = auxiliary_channels
lowerCAmelCase__ = auxiliary_num_convs
lowerCAmelCase__ = auxiliary_concat_input
lowerCAmelCase__ = loss_ignore_index
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ = self.backbone_config.to_dict()
lowerCAmelCase__ = self.__class__.model_type
return output | 90 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCAmelCase = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def _snake_case ( A , A=None , A=None , A=None ) -> Union[str, Any]:
lowerCAmelCase__ = True
while ask_again:
lowerCAmelCase__ = input(A )
try:
if default is not None and len(A ) == 0:
return default
return convert_value(A ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(A )
def _snake_case ( A , A=[] , A=None , A=0 ) -> List[Any]:
lowerCAmelCase__ = BulletMenu(A , A )
lowerCAmelCase__ = menu.run(default_choice=A )
return convert_value(A ) if convert_value is not None else result
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = int(A )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = int(A )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def _snake_case ( A ) -> str:
lowerCAmelCase__ = int(A )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = int(A )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = int(A )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def _snake_case ( A ) -> List[str]:
return {"yes": True, "no": False}[value.lower()]
class a__ ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = super()._format_usage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = usage.replace('''<command> [<args>] ''' , '''''' )
return usage | 90 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : List[str] = "facebook/bart-large-mnli"
lowercase__ : str = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
lowercase__ : List[str] = "text_classifier"
lowercase__ : List[str] = AutoTokenizer
lowercase__ : int = AutoModelForSequenceClassification
lowercase__ : Optional[Any] = ["text", ["text"]]
lowercase__ : str = ["text"]
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
super().setup()
lowerCAmelCase__ = self.model.config
lowerCAmelCase__ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
lowerCAmelCase__ = int(lowerCamelCase_ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
lowerCAmelCase__ = labels
return self.pre_processor(
[text] * len(lowerCamelCase_ ) , [F"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
lowerCAmelCase__ = outputs.logits
lowerCAmelCase__ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id] | 90 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : torch.FloatTensor
class a__ ( a__ , a__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCamelCase_ = 3 , lowerCamelCase_ = 3 , lowerCamelCase_ = ("DownEncoderBlock2D",) , lowerCamelCase_ = ("UpDecoderBlock2D",) , lowerCamelCase_ = (64,) , lowerCamelCase_ = 1 , lowerCamelCase_ = "silu" , lowerCamelCase_ = 3 , lowerCamelCase_ = 32 , lowerCamelCase_ = 2_56 , lowerCamelCase_ = 32 , lowerCamelCase_ = None , lowerCamelCase_ = 0.18_215 , lowerCamelCase_ = "group" , ) -> Union[str, Any]:
super().__init__()
# pass init params to Encoder
lowerCAmelCase__ = Encoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , down_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , double_z=lowerCamelCase_ , )
lowerCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
lowerCAmelCase__ = VectorQuantizer(lowerCamelCase_ , lowerCamelCase_ , beta=0.25 , remap=lowerCamelCase_ , sane_index_shape=lowerCamelCase_ )
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
# pass init params to Decoder
lowerCAmelCase__ = Decoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , up_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , norm_type=lowerCamelCase_ , )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> VQEncoderOutput:
lowerCAmelCase__ = self.encoder(lowerCamelCase_ )
lowerCAmelCase__ = self.quant_conv(lowerCamelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCamelCase_ )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.quantize(lowerCamelCase_ )
else:
lowerCAmelCase__ = h
lowerCAmelCase__ = self.post_quant_conv(lowerCamelCase_ )
lowerCAmelCase__ = self.decoder(lowerCamelCase_ , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase__ = sample
lowerCAmelCase__ = self.encode(lowerCamelCase_ ).latents
lowerCAmelCase__ = self.decode(lowerCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ ) | 90 | 1 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[Any]:
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Union[str, Any]:
lowerCAmelCase__ = {}
if top_k is not None:
lowerCAmelCase__ = top_k
return {}, {}, postprocess_params
def __call__( self , lowerCamelCase_ , **lowerCamelCase_ ) -> Tuple:
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[int]:
lowerCAmelCase__ = load_image(lowerCamelCase_ )
lowerCAmelCase__ = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
return model_inputs
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = self.model(**lowerCamelCase_ )
return model_outputs
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=5 ) -> List[Any]:
if top_k > self.model.config.num_labels:
lowerCAmelCase__ = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase__ = model_outputs.logits.softmax(-1 )[0]
lowerCAmelCase__ , lowerCAmelCase__ = probs.topk(lowerCamelCase_ )
elif self.framework == "tf":
lowerCAmelCase__ = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowerCAmelCase__ = tf.math.top_k(lowerCamelCase_ , k=lowerCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
lowerCAmelCase__ = scores.tolist()
lowerCAmelCase__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase_ , lowerCamelCase_ )] | 90 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = list[list[int]]
# assigning initial values to the grid
__UpperCAmelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCAmelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _snake_case ( A , A , A , A ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _snake_case ( A ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _snake_case ( A ) -> Matrix | None:
if location := find_empty_location(A ):
lowerCAmelCase__ , lowerCAmelCase__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A , A , A , A ):
lowerCAmelCase__ = digit
if sudoku(A ) is not None:
return grid
lowerCAmelCase__ = 0
return None
def _snake_case ( A ) -> None:
for row in grid:
for cell in row:
print(A , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__UpperCAmelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''') | 90 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.