code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
def get_masked_lm_array(snake_case__ : str ):
_snake_case : Tuple = F"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"
_snake_case : Dict = tf.train.load_variable(snake_case__ , snake_case__ )
if "kernel" in name:
_snake_case : Dict = array.transpose()
return torch.from_numpy(snake_case__ )
def get_encoder_array(snake_case__ : str ):
_snake_case : List[str] = F"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"
_snake_case : Tuple = tf.train.load_variable(snake_case__ , snake_case__ )
if "kernel" in name:
_snake_case : str = array.transpose()
return torch.from_numpy(snake_case__ )
def get_encoder_layer_array(snake_case__ : int , snake_case__ : str ):
_snake_case : Optional[Any] = F"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"
_snake_case : str = tf.train.load_variable(snake_case__ , snake_case__ )
if "kernel" in name:
_snake_case : List[Any] = array.transpose()
return torch.from_numpy(snake_case__ )
def get_encoder_attention_layer_array(snake_case__ : int , snake_case__ : str , snake_case__ : str ):
_snake_case : Optional[int] = F"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"
_snake_case : Tuple = tf.train.load_variable(snake_case__ , snake_case__ )
_snake_case : Optional[int] = array.reshape(snake_case__ )
if "kernel" in name:
_snake_case : Optional[Any] = array.transpose()
return torch.from_numpy(snake_case__ )
print(F"Loading model based on config from {config_path}..." )
_snake_case : Tuple = BertConfig.from_json_file(snake_case__ )
_snake_case : Union[str, Any] = BertForMaskedLM(snake_case__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
_snake_case : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
_snake_case : BertSelfAttention = layer.attention.self
_snake_case : Any = get_encoder_attention_layer_array(
snake_case__ , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
_snake_case : Dict = get_encoder_attention_layer_array(
snake_case__ , """_query_dense/bias""" , self_attn.query.bias.data.shape )
_snake_case : List[str] = get_encoder_attention_layer_array(
snake_case__ , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
_snake_case : Tuple = get_encoder_attention_layer_array(
snake_case__ , """_key_dense/bias""" , self_attn.key.bias.data.shape )
_snake_case : int = get_encoder_attention_layer_array(
snake_case__ , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
_snake_case : Dict = get_encoder_attention_layer_array(
snake_case__ , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
_snake_case : BertSelfOutput = layer.attention.output
_snake_case : Dict = get_encoder_attention_layer_array(
snake_case__ , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
_snake_case : Union[str, Any] = get_encoder_attention_layer_array(
snake_case__ , """_output_dense/bias""" , self_output.dense.bias.data.shape )
_snake_case : int = get_encoder_layer_array(snake_case__ , """_attention_layer_norm/gamma""" )
_snake_case : Optional[int] = get_encoder_layer_array(snake_case__ , """_attention_layer_norm/beta""" )
# Intermediate
_snake_case : BertIntermediate = layer.intermediate
_snake_case : Dict = get_encoder_layer_array(snake_case__ , """_intermediate_dense/kernel""" )
_snake_case : Any = get_encoder_layer_array(snake_case__ , """_intermediate_dense/bias""" )
# Output
_snake_case : BertOutput = layer.output
_snake_case : int = get_encoder_layer_array(snake_case__ , """_output_dense/kernel""" )
_snake_case : Tuple = get_encoder_layer_array(snake_case__ , """_output_dense/bias""" )
_snake_case : Optional[Any] = get_encoder_layer_array(snake_case__ , """_output_layer_norm/gamma""" )
_snake_case : str = get_encoder_layer_array(snake_case__ , """_output_layer_norm/beta""" )
# Embeddings
_snake_case : int = get_encoder_array("""_position_embedding_layer/embeddings""" )
_snake_case : int = get_encoder_array("""_type_embedding_layer/embeddings""" )
_snake_case : Optional[int] = get_encoder_array("""_embedding_norm_layer/gamma""" )
_snake_case : str = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
_snake_case : Union[str, Any] = model.cls.predictions.transform
_snake_case : Union[str, Any] = get_masked_lm_array("""dense/kernel""" )
_snake_case : int = get_masked_lm_array("""dense/bias""" )
_snake_case : str = get_masked_lm_array("""layer_norm/gamma""" )
_snake_case : str = get_masked_lm_array("""layer_norm/beta""" )
_snake_case : Optional[Any] = get_masked_lm_array("""embedding_table""" )
# Pooling
_snake_case : Union[str, Any] = BertPooler(config=snake_case__ )
_snake_case : BertPooler = get_encoder_array("""_pooler_layer/kernel""" )
_snake_case : BertPooler = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(snake_case__ )
# Integration test - should load without any errors ;)
_snake_case : str = BertForMaskedLM.from_pretrained(snake_case__ )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow Token Dropping checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
A_ = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 609 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
for attribute in key.split(""".""" ):
_snake_case : Dict = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
_snake_case : List[Any] = getattr(snake_case__ , snake_case__ ).shape
else:
_snake_case : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
_snake_case : List[str] = value
elif weight_type == "weight_g":
_snake_case : Optional[int] = value
elif weight_type == "weight_v":
_snake_case : List[str] = value
elif weight_type == "bias":
_snake_case : Optional[int] = value
else:
_snake_case : List[Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Any ):
"""simple docstring"""
_snake_case : Optional[int] = []
_snake_case : Optional[Any] = fairseq_model.state_dict()
_snake_case : Optional[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_snake_case : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , )
_snake_case : str = True
else:
for key, mapped_key in MAPPING.items():
_snake_case : Any = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
_snake_case : Tuple = True
if "*" in mapped_key:
_snake_case : Dict = name.split(snake_case__ )[0].split(""".""" )[-2]
_snake_case : Optional[int] = mapped_key.replace("""*""" , snake_case__ )
if "weight_g" in name:
_snake_case : int = """weight_g"""
elif "weight_v" in name:
_snake_case : Tuple = """weight_v"""
elif "weight" in name:
_snake_case : Optional[int] = """weight"""
elif "bias" in name:
_snake_case : str = """bias"""
else:
_snake_case : Tuple = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F"Unused weights: {unused_weights}" )
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[int] = full_name.split("""conv_layers.""" )[-1]
_snake_case : Tuple = name.split(""".""" )
_snake_case : Dict = int(items[0] )
_snake_case : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_snake_case : Dict = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_snake_case : Optional[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_snake_case : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_snake_case : Optional[int] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[Any]=None , snake_case__ : int=None , snake_case__ : Tuple=True ):
"""simple docstring"""
if config_path is not None:
_snake_case : Dict = HubertConfig.from_pretrained(snake_case__ )
else:
_snake_case : List[str] = HubertConfig()
if is_finetuned:
if dict_path:
_snake_case : Optional[int] = Dictionary.load(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_snake_case : Optional[int] = target_dict.pad_index
_snake_case : Union[str, Any] = target_dict.bos_index
_snake_case : List[Any] = target_dict.eos_index
_snake_case : List[str] = len(target_dict.symbols )
_snake_case : Any = os.path.join(snake_case__ , """vocab.json""" )
if not os.path.isdir(snake_case__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(snake_case__ ) )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , snake_case__ )
_snake_case : Any = WavaVecaCTCTokenizer(
snake_case__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=snake_case__ , )
_snake_case : List[str] = True if config.feat_extract_norm == """layer""" else False
_snake_case : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
_snake_case : Optional[Any] = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
_snake_case : Tuple = HubertForCTC(snake_case__ )
else:
_snake_case : Any = HubertModel(snake_case__ )
if is_finetuned:
_snake_case , _snake_case , _snake_case : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_snake_case , _snake_case , _snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_snake_case : List[str] = model[0].eval()
recursively_load_weights(snake_case__ , snake_case__ , snake_case__ )
hf_wavavec.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
A_ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 609 | 1 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Optional[int] = """encodec"""
def __init__( self : List[str] , __UpperCAmelCase : Dict=[1.5, 3.0, 6.0, 12.0, 24.0] , __UpperCAmelCase : Optional[int]=24000 , __UpperCAmelCase : Dict=1 , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : Optional[int]=128 , __UpperCAmelCase : Optional[Any]=32 , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : Optional[Any]=[8, 5, 4, 2] , __UpperCAmelCase : Tuple="weight_norm" , __UpperCAmelCase : str=7 , __UpperCAmelCase : List[Any]=7 , __UpperCAmelCase : Any=3 , __UpperCAmelCase : int=2 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Any="reflect" , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : str=1.0 , __UpperCAmelCase : List[str]=1024 , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Tuple=True , **__UpperCAmelCase : Optional[Any] , ):
a : int = target_bandwidths
a : Optional[Any] = sampling_rate
a : Dict = audio_channels
a : Optional[int] = normalize
a : Tuple = chunk_length_s
a : Dict = overlap
a : Dict = hidden_size
a : int = num_filters
a : Optional[Any] = num_residual_layers
a : Any = upsampling_ratios
a : str = norm_type
a : Dict = kernel_size
a : Any = last_kernel_size
a : Any = residual_kernel_size
a : List[str] = dilation_growth_rate
a : Union[str, Any] = use_causal_conv
a : int = pad_mode
a : int = compress
a : Any = num_lstm_layers
a : Optional[int] = trim_right_ratio
a : List[str] = codebook_size
a : Union[str, Any] = codebook_dim if codebook_dim is not None else hidden_size
a : Union[str, Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''')
super().__init__(**__UpperCAmelCase)
@property
def __snake_case ( self : Union[str, Any]):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def __snake_case ( self : Union[str, Any]):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length))
@property
def __snake_case ( self : Optional[Any]):
a : Union[str, Any] = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
@property
def __snake_case ( self : str):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10))
| 135 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Any = (IPNDMScheduler,)
UpperCAmelCase : Optional[int] = (("""num_inference_steps""", 5_0),)
def __snake_case ( self : Dict , **__UpperCAmelCase : Optional[Any]):
a : str = {"num_train_timesteps": 1000}
config.update(**__UpperCAmelCase)
return config
def __snake_case ( self : int , __UpperCAmelCase : Optional[Any]=0 , **__UpperCAmelCase : Union[str, Any]):
a : List[Any] = dict(self.forward_default_kwargs)
a : int = kwargs.pop("num_inference_steps" , __UpperCAmelCase)
a : int = self.dummy_sample
a : str = 0.1 * sample
a : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a : List[Any] = self.get_scheduler_config(**__UpperCAmelCase)
a : List[str] = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residuals
a : List[Any] = dummy_past_residuals[:]
if time_step is None:
a : Any = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCAmelCase)
a : List[Any] = scheduler_class.from_pretrained(__UpperCAmelCase)
new_scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residuals
a : Optional[Any] = dummy_past_residuals[:]
a : int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a : Dict = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a : Optional[int] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a : str = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def __snake_case ( self : int):
pass
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str=0 , **__UpperCAmelCase : List[Any]):
a : List[str] = dict(self.forward_default_kwargs)
a : Any = kwargs.pop("num_inference_steps" , __UpperCAmelCase)
a : Tuple = self.dummy_sample
a : str = 0.1 * sample
a : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a : Optional[int] = self.get_scheduler_config()
a : List[str] = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residuals (must be after setting timesteps)
a : Optional[int] = dummy_past_residuals[:]
if time_step is None:
a : Any = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCAmelCase)
a : List[Any] = scheduler_class.from_pretrained(__UpperCAmelCase)
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residual (must be after setting timesteps)
a : str = dummy_past_residuals[:]
a : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a : Tuple = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a : Dict = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a : List[str] = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def __snake_case ( self : str , **__UpperCAmelCase : Dict):
a : Tuple = self.scheduler_classes[0]
a : Optional[Any] = self.get_scheduler_config(**__UpperCAmelCase)
a : Any = scheduler_class(**__UpperCAmelCase)
a : int = 10
a : Union[str, Any] = self.dummy_model()
a : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
a : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase)
a : Any = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).prev_sample
for i, t in enumerate(scheduler.timesteps):
a : Tuple = model(__UpperCAmelCase , __UpperCAmelCase)
a : Union[str, Any] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).prev_sample
return sample
def __snake_case ( self : Optional[Any]):
a : List[Any] = dict(self.forward_default_kwargs)
a : List[str] = kwargs.pop("num_inference_steps" , __UpperCAmelCase)
for scheduler_class in self.scheduler_classes:
a : Tuple = self.get_scheduler_config()
a : Any = scheduler_class(**__UpperCAmelCase)
a : Dict = self.dummy_sample
a : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCAmelCase , "set_timesteps"):
scheduler.set_timesteps(__UpperCAmelCase)
elif num_inference_steps is not None and not hasattr(__UpperCAmelCase , "set_timesteps"):
a : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a : Dict = dummy_past_residuals[:]
a : Optional[int] = scheduler.timesteps[5]
a : List[Any] = scheduler.timesteps[6]
a : Any = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a : Any = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a : Dict = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __snake_case ( self : Tuple):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase , time_step=__UpperCAmelCase)
def __snake_case ( self : int):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=__UpperCAmelCase , time_step=__UpperCAmelCase)
def __snake_case ( self : Optional[Any]):
a : Optional[int] = self.full_loop()
a : List[str] = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_mean.item() - 2540529) < 10
| 135 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 13 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case : Any = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
snake_case : List[Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
snake_case : Any = model(UpperCamelCase__ )['''last_hidden_state''']
snake_case : int = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice.
snake_case : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 638 | 0 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__: Any = logging.get_logger(__name__)
lowerCAmelCase__: Optional[Any] = "▁"
lowerCAmelCase__: Union[str, Any] = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
lowerCAmelCase__: List[str] = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
lowerCAmelCase__: Dict = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
lowerCAmelCase__: Optional[int] = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
lowerCAmelCase__: Tuple = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class snake_case_ ( lowerCAmelCase ):
__lowerCamelCase : List[str] = ["input_ids"]
__lowerCamelCase : Tuple = VOCAB_FILES_NAMES
__lowerCamelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[Any] = RESOURCE_FILES_NAMES
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase="utf8" , __lowerCAmelCase="[UNK]" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="[PAD]" , __lowerCAmelCase="[CLS]" , __lowerCAmelCase="[MASK]" , __lowerCAmelCase = None , **__lowerCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , vocab_file=__lowerCAmelCase , encoding=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ : Tuple = do_lower_case
SCREAMING_SNAKE_CASE_ : List[Any] = sentencepiece_model_ckpt
SCREAMING_SNAKE_CASE_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
SCREAMING_SNAKE_CASE_ : int = self.load_vocab(filepath=__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = {self.sp_model.id_to_piece(__lowerCAmelCase ): id for id in range(self.sp_model.get_piece_size() )}
SCREAMING_SNAKE_CASE_ : str = {v: k for k, v in self.vocab.items()}
def __A ( self , __lowerCAmelCase ):
if text is None:
return None
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenize(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = '', []
for i, ch in enumerate(__lowerCAmelCase ):
if ch in self.SP_CHAR_MAPPING:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.SP_CHAR_MAPPING.get(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : List[str] = unicodedata.normalize('NFKC' , __lowerCAmelCase )
if self.is_whitespace(__lowerCAmelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = normalized_text, [], 0
if self.do_lower_case:
SCREAMING_SNAKE_CASE_ : Dict = text.lower()
for token in split_tokens:
if token[:1] == "▁":
SCREAMING_SNAKE_CASE_ : List[Any] = token[1:]
SCREAMING_SNAKE_CASE_ : Any = text[offset:].index(__lowerCAmelCase ) + offset
SCREAMING_SNAKE_CASE_ : List[Any] = start + len(__lowerCAmelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
SCREAMING_SNAKE_CASE_ : Tuple = end
return token_mapping
@property
def __A ( self ):
return len(self.vocab )
def __A ( self ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
SCREAMING_SNAKE_CASE_ : Any = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
return state
def __setstate__( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : int = {}
SCREAMING_SNAKE_CASE_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def __A ( self , __lowerCAmelCase ):
return "".join((self.SP_CHAR_MAPPING.get(__lowerCAmelCase , __lowerCAmelCase ) for c in text) )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=64 , __lowerCAmelCase=0.1 ):
if self.sp_model_kwargs.get('enable_sampling' ) is True:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
if self.sp_model_kwargs.get('alpha' ) is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
SCREAMING_SNAKE_CASE_ : Any = self.sp_model.EncodeAsPieces(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.sp_model.SampleEncodeAsPieces(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = []
for pi, piece in enumerate(__lowerCAmelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__lowerCAmelCase ) and pi != 0:
new_pieces.append(__lowerCAmelCase )
continue
else:
continue
SCREAMING_SNAKE_CASE_ : List[Any] = 0
for i, chunk in enumerate(__lowerCAmelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__lowerCAmelCase ) or self.is_punct(__lowerCAmelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
SCREAMING_SNAKE_CASE_ : Dict = i
if len(__lowerCAmelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = ''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ' ' ).strip()
return out_string
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = self.convert_ids_to_tokens(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = ''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ' ' ).strip()
return out_string
def __A ( self , __lowerCAmelCase ):
return self.vocab.get(__lowerCAmelCase , self.vocab.get(self.unk_token ) )
def __A ( self , __lowerCAmelCase ):
return self.reverse_vocab.get(__lowerCAmelCase , self.unk_token )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def __A ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def __A ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__lowerCAmelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__lowerCAmelCase ) + 1) + [1] * (len(__lowerCAmelCase ) + 3)
def __A ( self , __lowerCAmelCase ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def __A ( self , __lowerCAmelCase ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def __A ( self , __lowerCAmelCase ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def __A ( self , __lowerCAmelCase ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__lowerCAmelCase ) == 1:
SCREAMING_SNAKE_CASE_ : str = unicodedata.category(__lowerCAmelCase )
if cat == "Zs":
return True
return False
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
with io.open(__lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = line.rstrip('\n' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(__lowerCAmelCase )
return token_to_idx
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
SCREAMING_SNAKE_CASE_ : Dict = 0
if os.path.isdir(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(
__lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE_ : Tuple = token_index
writer.write(token + '\n' )
index += 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(__lowerCAmelCase , 'sentencepiece.bpe.model' )
with open(__lowerCAmelCase , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : Any = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (vocab_file,)
| 311 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> bool:
SCREAMING_SNAKE_CASE_ : int = int(number**0.5 )
return number == sq * sq
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[int, int]:
SCREAMING_SNAKE_CASE_ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE_ : int = x_den * y_den * z_den
SCREAMING_SNAKE_CASE_ : int = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = 35 ) -> int:
SCREAMING_SNAKE_CASE_ : set = set()
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : Fraction = Fraction(0 )
SCREAMING_SNAKE_CASE_ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
SCREAMING_SNAKE_CASE_ : Tuple = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE_ : Optional[int] = x_den * y_den
SCREAMING_SNAKE_CASE_ : Tuple = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ : int = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=2
SCREAMING_SNAKE_CASE_ : Dict = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = x_den * x_den * y_den * y_den
if is_sq(SCREAMING_SNAKE_CASE ) and is_sq(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Any = int(sqrt(SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(sqrt(SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ : str = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ : List[Any] = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=-1
SCREAMING_SNAKE_CASE_ : Optional[int] = x_num * y_num
SCREAMING_SNAKE_CASE_ : List[str] = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE_ : Dict = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ : int = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=2
SCREAMING_SNAKE_CASE_ : Dict = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE_ : str = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(SCREAMING_SNAKE_CASE ) and is_sq(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Dict = int(sqrt(SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ : List[Any] = int(sqrt(SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ : Any = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ : Tuple = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 311 | 1 |
from math import factorial
def A__( __lowerCAmelCase = 20 ):
_snake_case : Dict = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
_snake_case : Union[str, Any] = n // 2
return int(factorial(__lowerCAmelCase ) / (factorial(__lowerCAmelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowercase_ : Dict = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''')
| 304 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def A__( __lowerCAmelCase ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase_ : nn.Module , lowerCamelCase_ : int ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[Any] = module
_snake_case : int = nn.Sequential(
nn.Linear(module.in_features , lowerCamelCase_ , bias=lowerCamelCase_ ) , nn.Linear(lowerCamelCase_ , module.out_features , bias=lowerCamelCase_ ) , )
_snake_case : Dict = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Optional[int] , *lowerCamelCase_ : Any , **lowerCamelCase_ : int ):
'''simple docstring'''
return self.module(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) + self.adapter(lowerCamelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : List[str] = "bigscience/bloom-1b7"
# Constant values
_UpperCamelCase : List[Any] = 2.1_09_65_95_52_69_25_74
_UpperCamelCase : List[Any] = "Hello my name is"
_UpperCamelCase : Tuple = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
_UpperCamelCase : Tuple = 10
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : int = AutoTokenizer.from_pretrained(self.model_name )
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
_snake_case : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
_snake_case : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase_ , device_map='auto' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Dict = self.model_abit.config
self.assertTrue(hasattr(lowerCamelCase_ , 'quantization_config' ) )
_snake_case : List[str] = config.to_dict()
_snake_case : str = config.to_diff_dict()
_snake_case : List[Any] = config.to_json_string()
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
_snake_case : Union[str, Any] = self.model_fpaa.get_memory_footprint()
_snake_case : str = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
_snake_case : List[str] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCamelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' )
_snake_case : int = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase_ ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = BitsAndBytesConfig()
_snake_case : int = True
_snake_case : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase_ , device_map='auto' )
_snake_case : Tuple = self.tokenizer(self.input_text , return_tensors='pt' )
_snake_case : List[str] = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase_ ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Dict = BitsAndBytesConfig()
with self.assertRaises(lowerCamelCase_ ):
_snake_case : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase_ , load_in_abit=lowerCamelCase_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(lowerCamelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCamelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(lowerCamelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCamelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
_snake_case : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' )
_snake_case : Optional[int] = self.model_fpaa.to(torch.floataa )
_snake_case : Any = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
_snake_case : Any = self.model_fpaa.to('cpu' )
# Check this does not throw an error
_snake_case : Optional[Any] = self.model_fpaa.half()
# Check this does not throw an error
_snake_case : int = self.model_fpaa.float()
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : str = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=lowerCamelCase_ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __UpperCAmelCase ( cls : Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = 't5-small'
_snake_case : Tuple = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
_snake_case : Tuple = AutoTokenizer.from_pretrained(cls.model_name )
_snake_case : int = 'Translate in German: Hello, my dog is cute'
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
_snake_case : Dict = TaForConditionalGeneration._keep_in_fpaa_modules
_snake_case : List[Any] = None
# test with `t5-small`
_snake_case : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase_ , device_map='auto' )
_snake_case : List[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
_snake_case : Union[str, Any] = model.generate(**lowerCamelCase_ )
# test with `flan-t5-small`
_snake_case : Any = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase_ , device_map='auto' )
_snake_case : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
_snake_case : Union[str, Any] = model.generate(**lowerCamelCase_ )
_snake_case : List[str] = modules
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
_snake_case : Optional[int] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase_ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
_snake_case : List[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
_snake_case : List[Any] = model.generate(**lowerCamelCase_ )
# test with `flan-t5-small`
_snake_case : Any = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase_ , device_map='auto' )
_snake_case : Dict = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
_snake_case : Dict = model.generate(**lowerCamelCase_ )
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# model_name
_snake_case : Tuple = 'bigscience/bloom-560m'
_snake_case : Optional[Any] = 't5-small'
# Different types of model
_snake_case : str = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase_ , device_map='auto' )
# Sequence classification model
_snake_case : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase_ , device_map='auto' )
# CausalLM model
_snake_case : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase_ , device_map='auto' )
# Seq2seq model
_snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCamelCase_ , device_map='auto' )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
_snake_case : Optional[Any] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase_ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
_snake_case : Any = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
_snake_case : int = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase_ ) , self.EXPECTED_OUTPUTS )
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : int = 'facebook/opt-350m'
super().setUp()
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
_snake_case : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
_snake_case : Optional[int] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
_snake_case : List[str] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCamelCase_ ) ):
_snake_case : str = LoRALayer(module.q_proj , rank=16 )
_snake_case : Optional[Any] = LoRALayer(module.k_proj , rank=16 )
_snake_case : str = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
_snake_case : Union[str, Any] = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
_snake_case : Optional[Any] = model.forward(**lowerCamelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCamelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = "gpt2-xl"
_UpperCamelCase : Optional[Any] = 3.31_91_85_48_54_15_21_87
| 304 | 1 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCamelCase__ :
lowerCAmelCase = 42
# setable values
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = None
@classmethod
def __a ( cls : Any , _lowercase : CommonSchedulerState , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray ):
return cls(common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase )
@dataclass
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = 42
class lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCAmelCase = 42
@property
def __a ( self : Dict ):
return True
@register_to_config
def __init__( self : Any , _lowercase : int = 1_000 , _lowercase : float = 0.0_0_0_1 , _lowercase : float = 0.0_2 , _lowercase : str = "linear" , _lowercase : Optional[jnp.ndarray] = None , _lowercase : str = "fixed_small" , _lowercase : bool = True , _lowercase : str = "epsilon" , _lowercase : jnp.dtype = jnp.floataa , ):
A = dtype
def __a ( self : Union[str, Any] , _lowercase : Optional[CommonSchedulerState] = None ):
if common is None:
A = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
A = jnp.array(1.0 , dtype=self.dtype )
A = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase , )
def __a ( self : int , _lowercase : DDPMSchedulerState , _lowercase : jnp.ndarray , _lowercase : Optional[int] = None ):
return sample
def __a ( self : Any , _lowercase : DDPMSchedulerState , _lowercase : int , _lowercase : Tuple = () ):
A = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
A = (jnp.arange(0 , _lowercase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_lowercase , timesteps=_lowercase , )
def __a ( self : str , _lowercase : DDPMSchedulerState , _lowercase : List[Any] , _lowercase : Union[str, Any]=None , _lowercase : int=None ):
A = state.common.alphas_cumprod[t]
A = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
A = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
A = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
A = jnp.clip(_lowercase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
A = jnp.log(jnp.clip(_lowercase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
A = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
A = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
A = variance
A = state.common.betas[t]
A = (predicted_variance + 1) / 2
A = frac * max_log + (1 - frac) * min_log
return variance
def __a ( self : str , _lowercase : DDPMSchedulerState , _lowercase : jnp.ndarray , _lowercase : int , _lowercase : jnp.ndarray , _lowercase : Optional[jax.random.KeyArray] = None , _lowercase : bool = True , ):
A = timestep
if key is None:
A = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
A , A = jnp.split(_lowercase , sample.shape[1] , axis=1 )
else:
A = None
# 1. compute alphas, betas
A = state.common.alphas_cumprod[t]
A = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
A = 1 - alpha_prod_t
A = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
A = model_output
elif self.config.prediction_type == "v_prediction":
A = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
A = jnp.clip(_lowercase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
A = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
A = jax.random.split(_lowercase , num=1 )
A = jax.random.normal(_lowercase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_lowercase , _lowercase , predicted_variance=_lowercase ) ** 0.5) * noise
A = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
A = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_lowercase , state=_lowercase )
def __a ( self : List[Any] , _lowercase : DDPMSchedulerState , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray , ):
return add_noise_common(state.common , _lowercase , _lowercase , _lowercase )
def __a ( self : List[Any] , _lowercase : DDPMSchedulerState , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray , ):
return get_velocity_common(state.common , _lowercase , _lowercase , _lowercase )
def __len__( self : int ):
return self.config.num_train_timesteps
| 91 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """lilt"""
def __init__( self : Optional[Any] , _lowercase : Dict=30_522 , _lowercase : Any=768 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : str=3_072 , _lowercase : int="gelu" , _lowercase : Union[str, Any]=0.1 , _lowercase : Dict=0.1 , _lowercase : Optional[Any]=512 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=0.0_2 , _lowercase : int=1e-12 , _lowercase : Any=0 , _lowercase : List[str]="absolute" , _lowercase : Dict=None , _lowercase : Optional[int]=4 , _lowercase : Optional[int]=1_024 , **_lowercase : Union[str, Any] , ):
super().__init__(pad_token_id=_lowercase , **_lowercase )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = position_embedding_type
A = classifier_dropout
A = channel_shrink_ratio
A = max_ad_position_embeddings
| 91 | 1 |
from abc import ABC, abstractmethod
from typing import List, Optional
class _UpperCamelCase( _UpperCAmelCase ):
def __init__( self : Optional[int] ):
'''simple docstring'''
self.test()
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : str = 0
__a : Tuple = False
while not completed:
if counter == 1:
self.reset()
__a : int = self.advance()
if not self.does_advance(_UpperCAmelCase ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
__a : str = self.update(_UpperCAmelCase )
counter += 1
if counter > 1_0_0_0_0:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any]=False ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _UpperCamelCase( _UpperCAmelCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
super(_UpperCAmelCase , self ).__init__()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
__a : Dict = token_ids
__a : Optional[int] = len(self.token_ids )
__a : List[str] = -1 # the index of the currently fulfilled step
__a : List[Any] = False
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
__a : Optional[Any] = False
__a : List[str] = False
__a : Tuple = False
if self.does_advance(_UpperCAmelCase ):
self.fulfilled_idx += 1
__a : Tuple = True
if self.fulfilled_idx == (self.seqlen - 1):
__a : Any = True
__a : Dict = completed
else:
# failed to make progress.
__a : Union[str, Any] = True
self.reset()
return stepped, completed, reset
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : Optional[Any] = False
__a : Union[str, Any] = 0
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ):
'''simple docstring'''
__a : Union[str, Any] = PhrasalConstraint(self.token_ids )
if stateful:
__a : List[Any] = self.seqlen
__a : Optional[Any] = self.fulfilled_idx
__a : List[Any] = self.completed
return new_constraint
class _UpperCamelCase:
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict=True ):
'''simple docstring'''
__a : Optional[Any] = max([len(_UpperCAmelCase ) for one in nested_token_ids] )
__a : Union[str, Any] = {}
for token_ids in nested_token_ids:
__a : Tuple = root
for tidx, token_id in enumerate(_UpperCAmelCase ):
if token_id not in level:
__a : List[str] = {}
__a : List[str] = level[token_id]
if no_subsets and self.has_subsets(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
f''' {nested_token_ids}.''' )
__a : List[Any] = root
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : Tuple = self.trie
for current_token in current_seq:
__a : Any = start[current_token]
__a : int = list(start.keys() )
return next_tokens
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a : List[Any] = self.next_tokens(_UpperCAmelCase )
return len(_UpperCAmelCase ) == 0
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a : List[str] = list(root.values() )
if len(_UpperCAmelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_UpperCAmelCase ) for nn in next_nodes] )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : List[str] = self.count_leaves(_UpperCAmelCase )
return len(_UpperCAmelCase ) != leaf_count
class _UpperCamelCase( _UpperCAmelCase ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
super(_UpperCAmelCase , self ).__init__()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
__a : Optional[Any] = DisjunctiveTrie(_UpperCAmelCase )
__a : Union[str, Any] = nested_token_ids
__a : str = self.trie.max_height
__a : Union[str, Any] = []
__a : Any = False
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : List[str] = self.trie.next_tokens(self.current_seq )
if len(_UpperCAmelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
__a : Any = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
__a : List[Any] = False
__a : List[str] = False
__a : Optional[Any] = False
if self.does_advance(_UpperCAmelCase ):
self.current_seq.append(_UpperCAmelCase )
__a : List[Any] = True
else:
__a : Dict = True
self.reset()
__a : Dict = self.trie.reached_leaf(self.current_seq )
__a : Dict = completed
return stepped, completed, reset
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : Any = False
__a : List[str] = []
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : List[str]=False ):
'''simple docstring'''
__a : str = DisjunctiveConstraint(self.token_ids )
if stateful:
__a : Tuple = self.seqlen
__a : Union[str, Any] = self.current_seq
__a : Optional[int] = self.completed
return new_constraint
class _UpperCamelCase:
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a : Optional[int] = constraints
# max # of steps required to fulfill a given constraint
__a : Tuple = max([c.seqlen for c in constraints] )
__a : int = len(_UpperCAmelCase )
__a : Tuple = False
self.init_state()
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : Dict = []
__a : List[str] = None
__a : List[Any] = [constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.constraints]
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : Union[str, Any] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : List[str] = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__a : Optional[int] = constraint.advance()
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.append(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.extend(_UpperCAmelCase )
else:
__a : List[Any] = self.inprogress_constraint.advance()
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.append(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.extend(_UpperCAmelCase )
if len(_UpperCAmelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__a : Tuple = self.add(_UpperCAmelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
__a : Optional[Any] = False, False
if self.completed:
__a : str = True
__a : Any = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__a : Union[str, Any] = self.inprogress_constraint.update(_UpperCAmelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) )
__a : Union[str, Any] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__a : Tuple = None
if len(self.pending_constraints ) == 0:
# we're done!
__a : List[Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_UpperCAmelCase ):
__a : Tuple = pending_constraint.update(_UpperCAmelCase )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(_UpperCAmelCase )
__a : List[Any] = None
if not complete and stepped:
__a : Union[str, Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__a : int = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__a : Any = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any=True ):
'''simple docstring'''
__a : Tuple = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__a : Tuple = [
constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__a : Optional[Any] = self.inprogress_constraint.copy(stateful=_UpperCAmelCase )
__a : Optional[int] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 47 | """simple docstring"""
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
if not postfix_notation:
return 0
lowercase__: int = {'''+''', '''-''', '''*''', '''/'''}
lowercase__: list[Any] = []
for token in postfix_notation:
if token in operations:
lowercase__, lowercase__: Optional[int] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__UpperCAmelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 586 | 0 |
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
return " ".join(
''.join(word[::-1] ) if len(_UpperCAmelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 716 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Any = ShapEPipeline
UpperCamelCase : str = ['''prompt''']
UpperCamelCase : Tuple = ['''prompt''']
UpperCamelCase : Optional[int] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase : int = False
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ):
return 8
@property
def UpperCAmelCase_ ( self ):
__A : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : int = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__A : Optional[Any] = PriorTransformer(**_A )
return model
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : List[str] = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__A : List[Any] = ShapERenderer(**_A )
return model
def UpperCAmelCase_ ( self ):
__A : List[str] = self.dummy_prior
__A : Optional[int] = self.dummy_text_encoder
__A : List[Any] = self.dummy_tokenizer
__A : str = self.dummy_renderer
__A : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , )
__A : Any = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def UpperCAmelCase_ ( self , _A , _A=0 ):
if str(_A ).startswith('mps' ):
__A : List[Any] = torch.manual_seed(_A )
else:
__A : Dict = torch.Generator(device=_A ).manual_seed(_A )
__A : int = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def UpperCAmelCase_ ( self ):
__A : Tuple = 'cpu'
__A : Any = self.get_dummy_components()
__A : Tuple = self.pipeline_class(**_A )
__A : List[str] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : Tuple = pipe(**self.get_dummy_inputs(_A ) )
__A : int = output.images[0]
__A : str = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__A : Any = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase_ ( self ):
__A : List[str] = torch_device == 'cpu'
__A : Any = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_A , relax_max_difference=_A , )
def UpperCAmelCase_ ( self ):
__A : Any = self.get_dummy_components()
__A : Any = self.pipeline_class(**_A )
__A : Dict = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : Any = 1
__A : Dict = 2
__A : Tuple = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
__A : Optional[int] = batch_size * [inputs[key]]
__A : Optional[int] = pipe(**_A , num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
__A : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__A : Dict = ShapEPipeline.from_pretrained('openai/shap-e' )
__A : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : str = torch.Generator(device=_A ).manual_seed(0 )
__A : Tuple = pipe(
'a shark' , generator=_A , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A , _A )
| 77 | 0 |
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
A_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
A_ = []
A_ = []
A_ = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
A_ = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
"emoji": True,
},
}
]
A_ = 0
for log in Path().glob("*.log"):
A_ = 0
with open(log, "r") as f:
for line in f:
A_ = json.loads(line)
if line.get("nodeid", "") != "":
A_ = line["nodeid"]
if line.get("duration", None) is not None:
A_ = f'''{line["duration"]:.4f}'''
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
A_ = []
log.unlink()
A_ = ""
A_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
A_ = []
A_ = {}
for test in failed_tests:
A_ = test[0].split("::")
A_ = data[0].split("/")[-1]
if data[0] not in filesafailed:
A_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
A_ = [test[0] for test in failed_table]
A_ = list(set(files))
# Count number of instances in failed_tests
A_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
A_ = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
A_ = "Too many failed tests, please see the full report in the Action results."
A_ = len(err) + 10
A_ = message[: 3000 - offset] + f'''\n...\n```\n{err}'''
print(f'''### {message}''')
else:
A_ = "No failed tests! 🤗"
print(f'''## {message}''')
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
A_ = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
A_ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
A_ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
A_ = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
A_ = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
A_ = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
A_ = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
A_ = row[0]
else:
A_ = ""
A_ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 391 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = "encodec"
def __init__( self: List[str] , UpperCamelCase_: List[str]=[1.5, 3.0, 6.0, 12.0, 24.0] , UpperCamelCase_: Optional[Any]=2_4000 , UpperCamelCase_: Dict=1 , UpperCamelCase_: List[Any]=False , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , UpperCamelCase_: Dict=128 , UpperCamelCase_: List[Any]=32 , UpperCamelCase_: int=1 , UpperCamelCase_: Union[str, Any]=[8, 5, 4, 2] , UpperCamelCase_: List[Any]="weight_norm" , UpperCamelCase_: int=7 , UpperCamelCase_: Optional[int]=7 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: List[Any]=True , UpperCamelCase_: Any="reflect" , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Dict=2 , UpperCamelCase_: Any=1.0 , UpperCamelCase_: Any=1024 , UpperCamelCase_: int=None , UpperCamelCase_: Any=True , **UpperCamelCase_: Optional[Any] , ):
UpperCamelCase_ =target_bandwidths
UpperCamelCase_ =sampling_rate
UpperCamelCase_ =audio_channels
UpperCamelCase_ =normalize
UpperCamelCase_ =chunk_length_s
UpperCamelCase_ =overlap
UpperCamelCase_ =hidden_size
UpperCamelCase_ =num_filters
UpperCamelCase_ =num_residual_layers
UpperCamelCase_ =upsampling_ratios
UpperCamelCase_ =norm_type
UpperCamelCase_ =kernel_size
UpperCamelCase_ =last_kernel_size
UpperCamelCase_ =residual_kernel_size
UpperCamelCase_ =dilation_growth_rate
UpperCamelCase_ =use_causal_conv
UpperCamelCase_ =pad_mode
UpperCamelCase_ =compress
UpperCamelCase_ =num_lstm_layers
UpperCamelCase_ =trim_right_ratio
UpperCamelCase_ =codebook_size
UpperCamelCase_ =codebook_dim if codebook_dim is not None else hidden_size
UpperCamelCase_ =use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**UpperCamelCase_ )
@property
def UpperCamelCase__ ( self: int ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCamelCase__ ( self: List[Any] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def UpperCamelCase__ ( self: List[str] ):
UpperCamelCase_ =np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def UpperCamelCase__ ( self: Optional[Any] ):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 391 | 1 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
snake_case__ : List[Any] = """EncodecFeatureExtractor"""
snake_case__ : int = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
a_ : Union[str, Any] = self.feature_extractor
a_ : Dict = False
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Any=True ) -> str:
return self.tokenizer.get_decoder_prompt_ids(task=UpperCAmelCase_ , language=UpperCAmelCase_ , no_timestamps=UpperCAmelCase_ )
def __call__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : str ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase_ , **UpperCAmelCase_ )
a_ : Dict = kwargs.pop('audio' , UpperCAmelCase_ )
a_ : Any = kwargs.pop('sampling_rate' , UpperCAmelCase_ )
a_ : str = kwargs.pop('text' , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
a_ : str = args[0]
a_ : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if text is not None:
a_ : Optional[Any] = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ )
if audio is not None:
a_ : Tuple = self.feature_extractor(UpperCAmelCase_ , *UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , **UpperCAmelCase_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
a_ : int = audio_inputs['input_values']
if "padding_mask" in audio_inputs:
a_ : Optional[Any] = audio_inputs['padding_mask']
return inputs
def SCREAMING_SNAKE_CASE ( self : str , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[str]:
a_ : Dict = kwargs.pop('audio' , UpperCAmelCase_ )
a_ : Optional[Any] = kwargs.pop('padding_mask' , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
a_ : int = args[0]
a_ : Tuple = args[1:]
if audio_values is not None:
return self._decode_audio(UpperCAmelCase_ , padding_mask=UpperCAmelCase_ )
else:
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE ( self : Tuple , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[int]:
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict = None ) -> Union[str, Any]:
a_ : int = to_numpy(UpperCAmelCase_ )
a_ , a_ , a_ : Optional[Any] = audio_values.shape
if padding_mask is None:
return list(UpperCAmelCase_ )
a_ : Dict = to_numpy(UpperCAmelCase_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
a_ : Dict = seq_len - padding_mask.shape[-1]
a_ : Dict = 1 - self.feature_extractor.padding_value
a_ : Any = np.pad(UpperCAmelCase_ , ((0, 0), (0, difference)) , 'constant' , constant_values=UpperCAmelCase_ )
a_ : str = audio_values.tolist()
for i in range(UpperCAmelCase_ ):
a_ : List[Any] = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
a_ : Tuple = sliced_audio.reshape(UpperCAmelCase_ , -1 )
return audio_values
| 711 |
import argparse
import os
import re
UpperCAmelCase_ : List[Any] = 'src/transformers'
# Pattern that looks at the indentation in a line.
UpperCAmelCase_ : Any = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCAmelCase_ : str = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCAmelCase_ : Dict = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCAmelCase_ : int = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCAmelCase_ : List[str] = re.compile(R'\[([^\]]+)\]')
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a_ : List[Any] = _re_indent.search(__A )
return "" if search is None else search.groups()[0]
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : Union[str, Any]="" , __A : Dict=None , __A : Dict=None ) -> int:
"""simple docstring"""
a_ : Tuple = 0
a_ : Dict = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__A ):
index += 1
a_ : List[Any] = ['\n'.join(lines[:index] )]
else:
a_ : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
a_ : Tuple = [lines[index]]
index += 1
while index < len(__A ) and (end_prompt is None or not lines[index].startswith(__A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__A ) )
if index < len(__A ) - 1:
a_ : Dict = [lines[index + 1]]
index += 1
else:
a_ : Dict = []
else:
blocks.append('\n'.join(__A ) )
a_ : int = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__A ) > 0:
blocks.append('\n'.join(__A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def SCREAMING_SNAKE_CASE_ ( __A : Dict ) -> Any:
"""simple docstring"""
def _inner(__A : Tuple ):
return key(__A ).lower().replace('_' , '' )
return _inner
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : List[Any]=None ) -> List[str]:
"""simple docstring"""
def noop(__A : Tuple ):
return x
if key is None:
a_ : Optional[Any] = noop
# Constants are all uppercase, they go first.
a_ : List[Any] = [obj for obj in objects if key(__A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
a_ : Dict = [obj for obj in objects if key(__A )[0].isupper() and not key(__A ).isupper()]
# Functions begin with a lowercase, they go last.
a_ : Optional[int] = [obj for obj in objects if not key(__A )[0].isupper()]
a_ : Optional[Any] = ignore_underscore(__A )
return sorted(__A , key=__A ) + sorted(__A , key=__A ) + sorted(__A , key=__A )
def SCREAMING_SNAKE_CASE_ ( __A : Tuple ) -> Optional[int]:
"""simple docstring"""
def _replace(__A : List[Any] ):
a_ : Tuple = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
a_ : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a_ : str = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(__A )] ) + "]"
a_ : Optional[int] = import_statement.split('\n' )
if len(__A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
a_ : int = 2 if lines[1].strip() == '[' else 1
a_ : int = [(i, _re_strip_line.search(__A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
a_ : str = sort_objects(__A , key=lambda __A : x[1] )
a_ : int = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
a_ : str = _re_bracket_content.sub(_replace , lines[1] )
else:
a_ : List[str] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a_ : Optional[int] = keys[:-1]
a_ : Any = get_indent(lines[1] ) + ', '.join([F"""\"{k}\"""" for k in sort_objects(__A )] )
return "\n".join(__A )
else:
# Finally we have to deal with imports fitting on one line
a_ : Union[str, Any] = _re_bracket_content.sub(_replace , __A )
return import_statement
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Any=True ) -> Union[str, Any]:
"""simple docstring"""
with open(__A , encoding='utf-8' ) as f:
a_ : Union[str, Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
a_ : Optional[Any] = split_code_in_indented_blocks(
__A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
a_ : Any = main_blocks[block_idx]
a_ : Any = block.split('\n' )
# Get to the start of the imports.
a_ : Any = 0
while line_idx < len(__A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
a_ : Tuple = len(__A )
else:
line_idx += 1
if line_idx >= len(__A ):
continue
# Ignore beginning and last line: they don't contain anything.
a_ : List[str] = '\n'.join(block_lines[line_idx:-1] )
a_ : List[Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
a_ : Optional[int] = split_code_in_indented_blocks(__A , indent_level=__A )
# We have two categories of import key: list or _import_structure[key].append/extend
a_ : Tuple = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
a_ : Union[str, Any] = [(pattern.search(__A ).groups()[0] if pattern.search(__A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
a_ : str = [(i, key) for i, key in enumerate(__A ) if key is not None]
a_ : Optional[int] = [x[0] for x in sorted(__A , key=lambda __A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
a_ : int = 0
a_ : int = []
for i in range(len(__A ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
a_ : List[str] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(__A )
count += 1
# And we put our main block back together with its first and last line.
a_ : Any = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(__A ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(__A ) )
def SCREAMING_SNAKE_CASE_ ( __A : List[Any]=True ) -> List[Any]:
"""simple docstring"""
a_ : Dict = []
for root, _, files in os.walk(__A ):
if "__init__.py" in files:
a_ : Dict = sort_imports(os.path.join(__A , '__init__.py' ) , check_only=__A )
if result:
a_ : Tuple = [os.path.join(__A , '__init__.py' )]
if len(__A ) > 0:
raise ValueError(F"""Would overwrite {len(__A )} files, run `make style`.""" )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
UpperCAmelCase_ : Tuple = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 443 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_a = (3, 9, -11, 0, 7, 5, 1, -1)
_a = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _UpperCAmelCase:
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase:
def __init__( self , __a) -> None:
'''simple docstring'''
_UpperCamelCase = None
for i in sorted(lowerCAmelCase_ , reverse=lowerCAmelCase_):
_UpperCamelCase = Node(lowerCAmelCase_ , self.head)
def __iter__( self) -> Iterator[int]:
'''simple docstring'''
_UpperCamelCase = self.head
while node:
yield node.data
_UpperCamelCase = node.next_node
def __len__( self) -> int:
'''simple docstring'''
return sum(1 for _ in self)
def __str__( self) -> str:
'''simple docstring'''
return " -> ".join([str(lowerCAmelCase_) for node in self])
def lowerCamelCase__ ( __snake_case, __snake_case ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(__snake_case ) + list(__snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 19 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 393 | 0 |
'''simple docstring'''
def _a ( ):
return 1
def _a ( _SCREAMING_SNAKE_CASE : int ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def _a ( _SCREAMING_SNAKE_CASE : int ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE : int ):
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE : int ):
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE : int ):
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE : int ):
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE : int ):
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE : int = 200 ):
return two_pound(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution(int(input().strip()))) | 700 |
'''simple docstring'''
def _a ( _SCREAMING_SNAKE_CASE : int ):
_SCREAMING_SNAKE_CASE = int(_SCREAMING_SNAKE_CASE )
if n_element < 1:
_SCREAMING_SNAKE_CASE = ValueError("a should be a positive number" )
raise my_error
_SCREAMING_SNAKE_CASE = [1]
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = (0, 0, 0)
_SCREAMING_SNAKE_CASE = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_snake_case : int = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
_snake_case : str = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"The list with nth numbers is: {hamming_numbers}")
print("""-----------------------------------------------------""") | 493 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCamelCase : Dict = TypeVar('T')
class __lowerCAmelCase (Generic[T] ):
'''simple docstring'''
def __init__(self : Dict , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = data
lowercase__ = None
def __str__(self : Optional[int] ):
'''simple docstring'''
return f"{self.data}"
class __lowerCAmelCase (Generic[T] ):
'''simple docstring'''
def __init__(self : Tuple ):
'''simple docstring'''
lowercase__ = None
def __iter__(self : Dict ):
'''simple docstring'''
lowercase__ = self.top
while node:
yield node.data
lowercase__ = node.next
def __str__(self : int ):
'''simple docstring'''
return "->".join([str(__SCREAMING_SNAKE_CASE ) for item in self] )
def __len__(self : str ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
return self.top is None
def UpperCamelCase__ (self : Dict , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = Node(__SCREAMING_SNAKE_CASE )
if not self.is_empty():
lowercase__ = self.top
lowercase__ = node
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , __SCREAMING_SNAKE_CASE )
lowercase__ = self.top
lowercase__ = self.top.next
return pop_node.data
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
lowercase__ = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 460 | import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = inspect.getfile(accelerate.test_utils )
lowerCAmelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCAmelCase = test_metrics
@require_cpu
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
self.test_metrics.main()
@require_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
print(F"Found {torch.cuda.device_count()} devices." )
lowerCAmelCase = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=os.environ.copy() )
| 312 | 0 |
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =word.split()
def justify(lowercase__ , lowercase__ , lowercase__ ) -> str:
UpperCAmelCase_ =max_width - width
UpperCAmelCase_ =len(lowercase__ )
if len(lowercase__ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCAmelCase_ =words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCAmelCase_ =spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCAmelCase_ =(
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(lowercase__ ):
num_spaces_between_words_list[i] += 1
UpperCAmelCase_ =[]
for i in range(lowercase__ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(lowercase__ )
UpperCAmelCase_ =[]
UpperCAmelCase_ =[]
UpperCAmelCase_ =0
for word in words:
if width + len(lowercase__ ) + len(lowercase__ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(lowercase__ )
width += len(lowercase__ )
else:
# justify the line and add it to result
answer.append(justify(lowercase__ , lowercase__ , lowercase__ ) )
# reset new line and new width
UpperCAmelCase_ , UpperCAmelCase_ =[word], len(lowercase__ )
UpperCAmelCase_ =max_width - width - len(lowercase__ )
answer.append(" ".join(lowercase__ ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 550 |
__lowercase : Optional[Any] =[
(1000, """M"""),
(900, """CM"""),
(500, """D"""),
(400, """CD"""),
(100, """C"""),
(90, """XC"""),
(50, """L"""),
(40, """XL"""),
(10, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={"I": 1, "V": 5, "X": 1_0, "L": 5_0, "C": 1_0_0, "D": 5_0_0, "M": 1_0_0_0}
UpperCAmelCase_ =0
UpperCAmelCase_ =0
while place < len(lowercase__ ):
if (place + 1 < len(lowercase__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =[]
for arabic, roman in ROMAN:
((UpperCAmelCase_) , (UpperCAmelCase_)) =divmod(lowercase__ , lowercase__ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 550 | 1 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_snake_case : Optional[Any] = 16
_snake_case : Dict = 32
def _A ( __snake_case :Optional[Any] ) -> Dict:
"""simple docstring"""
return int(x / 2**20 )
class __SCREAMING_SNAKE_CASE :
def __enter__( self ) -> str:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__SCREAMING_SNAKE_CASE = torch.cuda.memory_allocated()
return self
def __exit__( self, *_a ) -> Optional[Any]:
gc.collect()
torch.cuda.empty_cache()
__SCREAMING_SNAKE_CASE = torch.cuda.memory_allocated()
__SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
__SCREAMING_SNAKE_CASE = bamb(self.end - self.begin )
__SCREAMING_SNAKE_CASE = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def _A ( __snake_case :Dict , __snake_case :List[str] = 16 , __snake_case :Optional[Any] = "bert-base-cased" , __snake_case :Dict = 320 , __snake_case :List[Any] = 160 , ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(A__ )
__SCREAMING_SNAKE_CASE = load_dataset(
"glue" , "mrpc" , split={"train": f'''train[:{n_train}]''', "validation": f'''validation[:{n_val}]'''} )
def tokenize_function(__snake_case :Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
__SCREAMING_SNAKE_CASE = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__SCREAMING_SNAKE_CASE = datasets.map(
A__ , batched=A__ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__snake_case :List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(A__ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["train"] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["validation"] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def _A ( __snake_case :Tuple , __snake_case :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__SCREAMING_SNAKE_CASE = config['''lr''']
__SCREAMING_SNAKE_CASE = int(config["num_epochs"] )
__SCREAMING_SNAKE_CASE = int(config["seed"] )
__SCREAMING_SNAKE_CASE = int(config["batch_size"] )
__SCREAMING_SNAKE_CASE = args.model_name_or_path
set_seed(A__ )
__SCREAMING_SNAKE_CASE = get_dataloaders(A__ , A__ , A__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
__SCREAMING_SNAKE_CASE = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__SCREAMING_SNAKE_CASE = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
__SCREAMING_SNAKE_CASE = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
__SCREAMING_SNAKE_CASE = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__SCREAMING_SNAKE_CASE = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
__SCREAMING_SNAKE_CASE = 0
# We also need to keep track of the stating epoch so files are named properly
__SCREAMING_SNAKE_CASE = 0
# Now we train the model
__SCREAMING_SNAKE_CASE = {}
for epoch in range(A__ , A__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(A__ ):
__SCREAMING_SNAKE_CASE = model(**A__ )
__SCREAMING_SNAKE_CASE = outputs.loss
__SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__SCREAMING_SNAKE_CASE = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(A__ , A__ )
def _A ( ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=A__ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A__ , )
parser.add_argument(
"--output_dir" , type=A__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=A__ , default=A__ , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=A__ , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=A__ , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=A__ , default=1 , help="Number of train epochs." , )
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 693 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ :Tuple = logging.get_logger(__name__)
a_ :Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ :Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
a_ :List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
a_ :Tuple = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
a_ :str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ :List[str] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ :Optional[int] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ :Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_UpperCAmelCase )
class lowercase :
def __call__( self : List[Any] , _lowercase : Any , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Union[bool, str] = False , _lowercase : Union[bool, str] = False , _lowercase : Optional[int] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[bool] = None , **_lowercase : str , ):
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE__ : List[str] = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = titles if not isinstance(_lowercase , _lowercase ) else [titles]
SCREAMING_SNAKE_CASE__ : Optional[int] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
SCREAMING_SNAKE_CASE__ : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : str = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
f"""There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Tuple = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowercase__ ( self : List[Any] , _lowercase : BatchEncoding , _lowercase : DPRReaderOutput , _lowercase : int = 16 , _lowercase : int = 64 , _lowercase : int = 4 , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = reader_output[:3]
SCREAMING_SNAKE_CASE__ : Any = len(_lowercase )
SCREAMING_SNAKE_CASE__ : int = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE__ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE__ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE__ : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE__ : List[str] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Dict , _lowercase : List[int] , _lowercase : List[int] , _lowercase : int , _lowercase : int , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
SCREAMING_SNAKE_CASE__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase : Dict = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
| 35 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Tuple = "instructblip_vision_model"
def __init__( self , _A=1408 , _A=6144 , _A=39 , _A=16 , _A=224 , _A=14 , _A="gelu" , _A=1e-6 , _A=0.0 , _A=1e-10 , _A=True , **_A , ) -> List[str]:
"""simple docstring"""
super().__init__(**_A)
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : int = num_attention_heads
_UpperCAmelCase : List[Any] = patch_size
_UpperCAmelCase : str = image_size
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Optional[int] = attention_dropout
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Optional[int] = qkv_bias
@classmethod
def snake_case__ ( cls , _A , **_A) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_A)
_UpperCAmelCase : List[Any] = cls.get_config_dict(_A , **_A)
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''') == "instructblip":
_UpperCAmelCase : Any = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(_A , **_A)
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[Any] = "instructblip_qformer"
def __init__( self , _A=30522 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=0.02 , _A=1e-12 , _A=0 , _A="absolute" , _A=2 , _A=1408 , **_A , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_A , **_A)
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : Tuple = num_hidden_layers
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : str = hidden_act
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = max_position_embeddings
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Tuple = layer_norm_eps
_UpperCAmelCase : Optional[Any] = position_embedding_type
_UpperCAmelCase : Dict = cross_attention_frequency
_UpperCAmelCase : str = encoder_hidden_size
@classmethod
def snake_case__ ( cls , _A , **_A) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_A)
_UpperCAmelCase : int = cls.get_config_dict(_A , **_A)
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''') == "instructblip":
_UpperCAmelCase : Union[str, Any] = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(_A , **_A)
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[Any] = "instructblip"
_SCREAMING_SNAKE_CASE : Tuple = True
def __init__( self , _A=None , _A=None , _A=None , _A=32 , **_A) -> str:
"""simple docstring"""
super().__init__(**_A)
if vision_config is None:
_UpperCAmelCase : Dict = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''')
if qformer_config is None:
_UpperCAmelCase : Optional[int] = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''')
if text_config is None:
_UpperCAmelCase : Any = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''')
_UpperCAmelCase : Any = InstructBlipVisionConfig(**_A)
_UpperCAmelCase : List[Any] = InstructBlipQFormerConfig(**_A)
_UpperCAmelCase : Optional[int] = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
_UpperCAmelCase : str = CONFIG_MAPPING[text_model_type](**_A)
_UpperCAmelCase : Dict = self.text_config.tie_word_embeddings
_UpperCAmelCase : Union[str, Any] = self.text_config.is_encoder_decoder
_UpperCAmelCase : Any = num_query_tokens
_UpperCAmelCase : Any = self.vision_config.hidden_size
_UpperCAmelCase : Optional[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_UpperCAmelCase : Optional[Any] = 1.0
_UpperCAmelCase : Optional[int] = 0.02
@classmethod
def snake_case__ ( cls , _A , _A , _A , **_A , ) -> List[str]:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_A , )
def snake_case__ ( self) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = copy.deepcopy(self.__dict__)
_UpperCAmelCase : List[str] = self.vision_config.to_dict()
_UpperCAmelCase : Optional[int] = self.qformer_config.to_dict()
_UpperCAmelCase : List[str] = self.text_config.to_dict()
_UpperCAmelCase : int = self.__class__.model_type
return output
| 701 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( __A : int , __A : Optional[Any] , __A : int ) -> int:
# Initialise PyTorch model
_UpperCAmelCase : Dict = RemBertConfig.from_json_file(__A )
print('''Building PyTorch model from configuration: {}'''.format(str(__A ) ) )
_UpperCAmelCase : int = RemBertModel(__A )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__A , __A , __A )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(__A ) )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 186 | 0 |
'''simple docstring'''
from datetime import datetime
import requests
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase ='''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
_UpperCamelCase =requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(__SCREAMING_SNAKE_CASE ).content
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = input('Enter Video/IGTV url: ').strip()
__lowerCamelCase : Optional[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 404 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( lowercase_ , unittest.TestCase):
"""simple docstring"""
lowerCAmelCase_ = CTRLTokenizer
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCamelCase__ ( self : int ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase =['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_UpperCamelCase =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
_UpperCamelCase =['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_UpperCamelCase ={'''unk_token''': '''<unk>'''}
_UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
def UpperCamelCase__ ( self : Optional[Any] , **UpperCamelCase__ : str ) -> str:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ) -> int:
_UpperCamelCase ='''adapt react readapt apt'''
_UpperCamelCase ='''adapt react readapt apt'''
return input_text, output_text
def UpperCamelCase__ ( self : Dict ) -> List[str]:
_UpperCamelCase =CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase ='''adapt react readapt apt'''
_UpperCamelCase ='''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_UpperCamelCase =tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
_UpperCamelCase =tokens + [tokenizer.unk_token]
_UpperCamelCase =[0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
| 404 | 1 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
snake_case_ = logging.get_logger(__name__)
class _lowercase :
_UpperCamelCase = None
@experimental
def _lowerCamelCase( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ) -> List[str]:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return _map_with_joblib(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowerCamelCase( UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ) -> Optional[Any]:
A : Dict = num_proc if num_proc <= len(UpperCamelCase__ ) else len(UpperCamelCase__ )
A : Any = [] # We organize the splits ourselve (contiguous splits)
for index in range(UpperCamelCase__ ):
A : Union[str, Any] = len(UpperCamelCase__ ) // num_proc
A : List[Any] = len(UpperCamelCase__ ) % num_proc
A : Optional[Any] = div * index + min(UpperCamelCase__ , UpperCamelCase__ )
A : List[str] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(UpperCamelCase__ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F'''Error dividing inputs iterable among processes. '''
F'''Total number of objects {len(UpperCamelCase__ )}, '''
F'''length: {sum(len(i[1] ) for i in split_kwds )}''' )
logger.info(
F'''Spawning {num_proc} processes for {len(UpperCamelCase__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' )
A, A : Dict = None, None
if not disable_tqdm:
A, A : List[str] = (RLock(),), tqdm.set_lock
with Pool(UpperCamelCase__ , initargs=UpperCamelCase__ , initializer=UpperCamelCase__ ) as pool:
A : str = pool.map(UpperCamelCase__ , UpperCamelCase__ )
logger.info(F'''Finished {num_proc} processes''' )
A : List[Any] = [obj for proc_res in mapped for obj in proc_res]
logger.info(F'''Unpacked {len(UpperCamelCase__ )} objects''' )
return mapped
def _lowerCamelCase( UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ) -> Any:
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=UpperCamelCase__ ):
return joblib.Parallel()(
joblib.delayed(UpperCamelCase__ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def _lowerCamelCase( UpperCamelCase__ : str ) -> List[str]:
A : Any = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
A : Dict = None
| 537 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
snake_case_ = logging.get_logger(__name__)
def _lowerCamelCase( UpperCamelCase__ : int ) -> List[List[ImageInput]]:
if isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCamelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCamelCase__ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class _lowercase ( a ):
_UpperCamelCase = ["""pixel_values"""]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
A : int = size if size is not None else {'''shortest_edge''': 256}
A : str = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
A : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
A : List[Any] = get_size_dict(_UpperCAmelCase , param_name='''crop_size''' )
A : Dict = do_resize
A : Union[str, Any] = size
A : int = do_center_crop
A : Union[str, Any] = crop_size
A : int = resample
A : Any = do_rescale
A : Optional[Any] = rescale_factor
A : Any = offset
A : int = do_normalize
A : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = None , **_UpperCAmelCase , ):
A : str = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" in size:
A : Union[str, Any] = get_resize_output_image_size(_UpperCAmelCase , size['''shortest_edge'''] , default_to_square=_UpperCAmelCase )
elif "height" in size and "width" in size:
A : Optional[Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
A : Optional[int] = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ):
A : Dict = image.astype(np.floataa )
if offset:
A : List[str] = image - (scale / 2)
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
A : Dict = to_numpy_array(_UpperCAmelCase )
if do_resize:
A : int = self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase )
if do_center_crop:
A : Optional[int] = self.center_crop(_UpperCAmelCase , size=_UpperCAmelCase )
if do_rescale:
A : int = self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase , offset=_UpperCAmelCase )
if do_normalize:
A : List[Any] = self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase )
A : Optional[int] = to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase )
return image
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A : Optional[Any] = resample if resample is not None else self.resample
A : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
A : List[Any] = offset if offset is not None else self.offset
A : str = do_normalize if do_normalize is not None else self.do_normalize
A : List[str] = image_mean if image_mean is not None else self.image_mean
A : Optional[Any] = image_std if image_std is not None else self.image_std
A : List[Any] = size if size is not None else self.size
A : Any = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
A : Optional[int] = crop_size if crop_size is not None else self.crop_size
A : int = get_size_dict(_UpperCAmelCase , param_name='''crop_size''' )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
A : List[Any] = make_batched(_UpperCAmelCase )
A : List[Any] = [
[
self._preprocess_image(
image=_UpperCAmelCase , do_resize=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , do_center_crop=_UpperCAmelCase , crop_size=_UpperCAmelCase , do_rescale=_UpperCAmelCase , rescale_factor=_UpperCAmelCase , offset=_UpperCAmelCase , do_normalize=_UpperCAmelCase , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase , data_format=_UpperCAmelCase , )
for img in video
]
for video in videos
]
A : Any = {'''pixel_values''': videos}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 537 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a : Dict = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
a : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 639 |
from collections import Counter
from timeit import timeit
def lowercase_ ( _UpperCamelCase = "" , ):
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def lowercase_ ( _UpperCamelCase = "" ):
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return True
__lowercase = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__lowercase = {}
for character in lower_case_input_str:
__lowercase = character_freq_dict.get(_UpperCamelCase , 0 ) + 1
__lowercase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowercase_ ( _UpperCamelCase = "" ):
'''simple docstring'''
print('''\nFor string = ''' , _UpperCamelCase , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(_UpperCamelCase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(_UpperCamelCase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
a : int = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
a : Tuple = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
| 639 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( lowercase__ , lowercase__ , unittest.TestCase ):
__UpperCAmelCase = StableDiffusionXLImgaImgPipeline
__UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
__UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'latents'}
__UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : List[Any] =UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), attention_head_dim=(2, 4), use_linear_projection=_snake_case, addition_embed_type='''text_time''', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, )
snake_case : str =EulerDiscreteScheduler(
beta_start=0.0_0085, beta_end=0.012, steps_offset=1, beta_schedule='''scaled_linear''', timestep_spacing='''leading''', )
torch.manual_seed(0 )
snake_case : Tuple =AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
snake_case : int =CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='''gelu''', projection_dim=32, )
snake_case : Tuple =CLIPTextModel(_snake_case )
snake_case : Union[str, Any] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''', local_files_only=_snake_case )
snake_case : Union[str, Any] =CLIPTextModelWithProjection(_snake_case )
snake_case : str =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''', local_files_only=_snake_case )
snake_case : str ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __snake_case ( self : Dict, _snake_case : Any, _snake_case : Union[str, Any]=0 ):
'''simple docstring'''
snake_case : Optional[int] =floats_tensor((1, 3, 32, 32), rng=random.Random(_snake_case ) ).to(_snake_case )
snake_case : Optional[int] =image / 2 + 0.5
if str(_snake_case ).startswith('''mps''' ):
snake_case : Tuple =torch.manual_seed(_snake_case )
else:
snake_case : Optional[Any] =torch.Generator(device=_snake_case ).manual_seed(_snake_case )
snake_case : int ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def __snake_case ( self : Any ):
'''simple docstring'''
snake_case : Optional[int] ="cpu" # ensure determinism for the device-dependent torch.Generator
snake_case : int =self.get_dummy_components()
snake_case : str =StableDiffusionXLImgaImgPipeline(**_snake_case )
snake_case : str =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
snake_case : Optional[int] =self.get_dummy_inputs(_snake_case )
snake_case : Union[str, Any] =sd_pipe(**_snake_case ).images
snake_case : Tuple =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case : List[Any] =np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __snake_case ( self : Tuple ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __snake_case ( self : Any ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __snake_case ( self : Dict ):
'''simple docstring'''
pass
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
snake_case : List[Any] =self.get_dummy_components()
snake_case : Optional[int] =StableDiffusionXLImgaImgPipeline(**_snake_case )
snake_case : str =sd_pipe.to(_snake_case )
snake_case : Optional[Any] =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
# forward without prompt embeds
snake_case : Union[str, Any] =self.get_dummy_inputs(_snake_case )
snake_case : Any =3 * ["this is a negative prompt"]
snake_case : Dict =negative_prompt
snake_case : str =3 * [inputs["prompt"]]
snake_case : Optional[int] =sd_pipe(**_snake_case )
snake_case : Union[str, Any] =output.images[0, -3:, -3:, -1]
# forward with prompt embeds
snake_case : List[str] =self.get_dummy_inputs(_snake_case )
snake_case : List[Any] =3 * ["this is a negative prompt"]
snake_case : List[str] =3 * [inputs.pop('''prompt''' )]
(
snake_case
) : Optional[Any] =sd_pipe.encode_prompt(_snake_case, negative_prompt=_snake_case )
snake_case : Tuple =sd_pipe(
**_snake_case, prompt_embeds=_snake_case, negative_prompt_embeds=_snake_case, pooled_prompt_embeds=_snake_case, negative_pooled_prompt_embeds=_snake_case, )
snake_case : Union[str, Any] =output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : int, _snake_case : Tuple, _snake_case : Dict="cpu", _snake_case : Tuple=torch.floataa, _snake_case : List[str]=0 ):
'''simple docstring'''
snake_case : int =torch.Generator(device=_snake_case ).manual_seed(_snake_case )
snake_case : str =np.random.RandomState(_snake_case ).standard_normal((1, 4, 64, 64) )
snake_case : Optional[int] =torch.from_numpy(_snake_case ).to(device=_snake_case, dtype=_snake_case )
snake_case : Any ={
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : Any ):
'''simple docstring'''
snake_case : Dict =DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
snake_case : List[str] =self.get_inputs(_snake_case )
snake_case : List[Any] =pipe(**_snake_case ).images
snake_case : str =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case : Union[str, Any] =np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 704 |
'''simple docstring'''
from __future__ import annotations
A : List[Any] = 8.9_88E9 # units = N * m^s * C^-2
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Tuple =abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
snake_case : int =COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
snake_case : Any =abs(lowerCamelCase_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
snake_case : Optional[int] =abs(lowerCamelCase_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
snake_case : int =(COULOMBS_CONSTANT * charge_product / abs(lowerCamelCase_ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136 | 0 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase_ ( _snake_case : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
'''simple docstring'''
__magic_name__ : Any = []
if isinstance(_snake_case , _snake_case ):
for v in tree.values():
shapes.extend(_fetch_dims(_snake_case ) )
elif isinstance(_snake_case , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(_snake_case ) )
elif isinstance(_snake_case , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def lowerCAmelCase_ ( _snake_case : int , _snake_case : Tuple[int, ...] ) -> Tuple[int, ...]:
'''simple docstring'''
__magic_name__ : List[Any] = []
for d in reversed(_snake_case ):
idx.append(flat_idx % d )
__magic_name__ : List[str] = flat_idx // d
return tuple(reversed(_snake_case ) )
@torch.jit.ignore
def lowerCAmelCase_ ( _snake_case : Sequence[int] , _snake_case : Sequence[int] , _snake_case : Sequence[int] , _snake_case : Optional[Sequence[bool]] = None , _snake_case : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(_snake_case : List[bool] ) -> None:
__magic_name__ : Optional[Any] = True
for i in range(len(_snake_case ) ):
__magic_name__ : Dict = -1 * (i + 1)
l[reversed_idx] &= tally
__magic_name__ : Optional[int] = l[reversed_idx]
if start_edges is None:
__magic_name__ : str = [s == 0 for s in start]
reduce_edge_list(_snake_case )
if end_edges is None:
__magic_name__ : Optional[int] = [e == (d - 1) for e, d in zip(_snake_case , _snake_case )]
reduce_edge_list(_snake_case )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_snake_case ) == 0:
return [()]
elif len(_snake_case ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
__magic_name__ : List[Tuple[slice, ...]] = []
__magic_name__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_snake_case , _snake_case ):
if s == e:
path_list.append(slice(_snake_case , s + 1 ) )
else:
break
__magic_name__ : Tuple[slice, ...] = tuple(_snake_case )
__magic_name__ : Dict = len(_snake_case )
# start == end, and we're done
if divergence_idx == len(_snake_case ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__magic_name__ : Dict = start[divergence_idx]
return tuple(
path + (slice(_snake_case , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__magic_name__ : int = end[divergence_idx]
return tuple(
path + (slice(_snake_case , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__magic_name__ : Union[str, Any] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowerCAmelCase_ ( _snake_case : torch.Tensor , _snake_case : int , _snake_case : int , _snake_case : int ) -> torch.Tensor:
'''simple docstring'''
__magic_name__ : Any = t.shape[:no_batch_dims]
__magic_name__ : List[str] = list(_flat_idx_to_idx(_snake_case , _snake_case ) )
# _get_minimal_slice_set is inclusive
__magic_name__ : List[str] = list(_flat_idx_to_idx(flat_end - 1 , _snake_case ) )
# Get an ordered list of slices to perform
__magic_name__ : Optional[int] = _get_minimal_slice_set(
_snake_case , _snake_case , _snake_case , )
__magic_name__ : str = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowerCAmelCase_ ( _snake_case : Callable , _snake_case : Dict[str, Any] , _snake_case : int , _snake_case : int , _snake_case : bool = False , _snake_case : Any = None , _snake_case : bool = False , ) -> Any:
'''simple docstring'''
if not (len(_snake_case ) > 0):
raise ValueError("Must provide at least one input" )
__magic_name__ : Optional[int] = [shape[:no_batch_dims] for shape in _fetch_dims(_snake_case )]
__magic_name__ : Dict = tuple([max(_snake_case ) for s in zip(*_snake_case )] )
def _prep_inputs(_snake_case : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__magic_name__ : Tuple = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__magic_name__ : Tuple = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
__magic_name__ : Tuple = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__magic_name__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , _snake_case )
__magic_name__ : Tuple = None
if _out is not None:
__magic_name__ : str = tensor_tree_map(lambda _snake_case : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
__magic_name__ : Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
__magic_name__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_snake_case : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__magic_name__ : Tuple = 0
__magic_name__ : Dict = prepped_outputs
for _ in range(_snake_case ):
# Chunk the input
if not low_mem:
__magic_name__ : Optional[int] = _select_chunk
else:
__magic_name__ : Optional[Any] = partial(
_chunk_slice , flat_start=_snake_case , flat_end=min(_snake_case , i + chunk_size ) , no_batch_dims=len(_snake_case ) , )
__magic_name__ : Dict[str, Any] = tensor_tree_map(_snake_case , _snake_case )
# Run the layer on the chunk
__magic_name__ : Optional[Any] = layer(**_snake_case )
# Allocate space for the output
if out is None:
__magic_name__ : Optional[int] = tensor_tree_map(lambda _snake_case : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _snake_case )
# Put the chunk in its pre-allocated space
if isinstance(_snake_case , _snake_case ):
def assign(_snake_case : dict , _snake_case : dict ) -> None:
for k, v in da.items():
if isinstance(_snake_case , _snake_case ):
assign(_snake_case , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__magic_name__ : List[Any] = da[k]
assign(_snake_case , _snake_case )
elif isinstance(_snake_case , _snake_case ):
for xa, xa in zip(_snake_case , _snake_case ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__magic_name__ : List[Any] = xa
elif isinstance(_snake_case , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__magic_name__ : int = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
__magic_name__ : str = tensor_tree_map(lambda _snake_case : t.view(orig_batch_dims + t.shape[1:] ) , _snake_case )
return out
class _snake_case :
def __init__( self , _a = 512 , ):
__magic_name__ : Optional[Any] = max_chunk_size
__magic_name__ : Optional[int] = None
__magic_name__ : Optional[tuple] = None
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__magic_name__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
__magic_name__ : List[str] = [c for c in candidates if c > min_chunk_size]
__magic_name__ : Dict = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(_a ) -> bool:
try:
with torch.no_grad():
fn(*_a , chunk_size=_a )
return True
except RuntimeError:
return False
__magic_name__ : int = 0
__magic_name__ : Optional[int] = len(_a ) - 1
while i > min_viable_chunk_size_index:
__magic_name__ : Tuple = test_chunk_size(candidates[i] )
if not viable:
__magic_name__ : Dict = (min_viable_chunk_size_index + i) // 2
else:
__magic_name__ : Any = i
__magic_name__ : Dict = (i + len(_a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : str = True
for aa, aa in zip(_a , _a ):
assert type(_a ) == type(_a )
if isinstance(_a , (list, tuple) ):
consistent &= self._compare_arg_caches(_a , _a )
elif isinstance(_a , _a ):
__magic_name__ : int = [v for _, v in sorted(aa.items() , key=lambda _a : x[0] )]
__magic_name__ : Tuple = [v for _, v in sorted(aa.items() , key=lambda _a : x[0] )]
consistent &= self._compare_arg_caches(_a , _a )
else:
consistent &= aa == aa
return consistent
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , ):
__magic_name__ : List[str] = True
__magic_name__ : tuple = tree_map(lambda _a : a.shape if isinstance(_a , torch.Tensor ) else a , _a , _a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(_a )
__magic_name__ : Optional[int] = self._compare_arg_caches(self.cached_arg_data , _a )
else:
# Otherwise, we can reuse the precomputed value
__magic_name__ : List[Any] = False
if not consistent:
__magic_name__ : Any = self._determine_favorable_chunk_size(
_a , _a , _a , )
__magic_name__ : Dict = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 124 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
snake_case : List[Any] = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 124 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
__lowerCamelCase : Dict = CLIPTokenizer
__lowerCamelCase : Optional[Any] = CLIPTokenizerFast
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Optional[int] = {}
__lowerCamelCase : List[Any] = False
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
a__ : Tuple = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
a__ : str = dict(zip(a_ , range(len(a_ ) ) ) )
a__ : Optional[int] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
a__ : Union[str, Any] = {"unk_token": "<unk>"}
a__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def UpperCAmelCase ( self : Optional[Any] , **a_ : Tuple ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a_ )
def UpperCAmelCase ( self : Tuple , **a_ : Any ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def UpperCAmelCase ( self : Tuple , a_ : Dict ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = "lower newer"
a__ : Dict = "lower newer"
return input_text, output_text
def UpperCAmelCase ( self : Any ) -> List[Any]:
'''simple docstring'''
a__ : List[str] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a__ : Optional[Any] = "lower newer"
a__ : Tuple = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
a__ : Tuple = tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
a__ : List[str] = tokens + [tokenizer.unk_token]
a__ : str = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
@require_ftfy
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
a__ : Dict = self.tokenizer_class.from_pretrained(a_ , **a_ )
a__ : Any = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
a__ : Optional[int] = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
a__ : str = tokenizer_s.tokenize(a_ )
a__ : int = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
a__ : Dict = "xa\u0303y" + " " + "x\xe3y"
a__ : Any = tokenizer_s.tokenize(a_ )
a__ : Optional[int] = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Test that the tokenization is identical on unicode of space type
a__ : str = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
a__ : str = tokenizer_s.tokenize(a_ )
a__ : List[Any] = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Test that the tokenization is identical on unicode of line break type
a__ : int = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
a__ : Any = tokenizer_s.tokenize(a_ )
a__ : Dict = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_ , a_ )
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
a__ : Union[str, Any] = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a__ : Union[str, Any] = F"{text_of_1_token} {text_of_1_token}"
a__ : List[str] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , )
a__ : List[Any] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )) , )
a__ : List[Any] = F" {text}"
a__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , )
a__ : Tuple = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )) , )
def UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
pass | 251 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __UpperCAmelCase :
def __init__( self : Any , a_ : int , a_ : Any=13 , a_ : int=30 , a_ : int=2 , a_ : str=3 , a_ : List[Any]=True , a_ : Union[str, Any]=True , a_ : Any=32 , a_ : Union[str, Any]=2 , a_ : Union[str, Any]=4 , a_ : Optional[int]=37 , a_ : Any="gelu" , a_ : Optional[Any]=0.1 , a_ : str=0.1 , a_ : str=10 , a_ : str=0.02 , a_ : Dict=3 , a_ : Optional[Any]=None , a_ : str=2 , ) -> List[Any]:
'''simple docstring'''
a__ : Optional[Any] = parent
a__ : Optional[int] = batch_size
a__ : int = image_size
a__ : Optional[Any] = patch_size
a__ : List[Any] = num_channels
a__ : List[str] = is_training
a__ : List[Any] = use_labels
a__ : List[Any] = hidden_size
a__ : Tuple = num_hidden_layers
a__ : Dict = num_attention_heads
a__ : str = intermediate_size
a__ : Union[str, Any] = hidden_act
a__ : List[str] = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : Optional[int] = type_sequence_label_size
a__ : Optional[Any] = initializer_range
a__ : Any = scope
a__ : List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
a__ : Any = (image_size // patch_size) ** 2
a__ : List[str] = num_patches + 2
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : List[str] = None
if self.use_labels:
a__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : int = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase ( self : Any , a_ : Dict , a_ : Optional[Any] , a_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
a__ : int = TFDeiTModel(config=a_ )
a__ : List[str] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Any , a_ : int , a_ : Tuple , a_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
a__ : Dict = TFDeiTForMaskedImageModeling(config=a_ )
a__ : str = model(a_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
a__ : List[Any] = 1
a__ : int = TFDeiTForMaskedImageModeling(a_ )
a__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ : Any = model(a_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self : List[str] , a_ : List[str] , a_ : str , a_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ : Any = self.type_sequence_label_size
a__ : Union[str, Any] = TFDeiTForImageClassification(a_ )
a__ : List[str] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a__ : Union[str, Any] = 1
a__ : Any = TFDeiTForImageClassification(a_ )
a__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ : int = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : str ) -> str:
'''simple docstring'''
a__ : List[Any] = self.prepare_config_and_inputs()
a__ , a__ , a__ : Optional[Any] = config_and_inputs
a__ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__lowerCamelCase : Union[str, Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__lowerCamelCase : Dict = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__lowerCamelCase : Any = False
__lowerCamelCase : Dict = False
__lowerCamelCase : int = False
__lowerCamelCase : Optional[Any] = False
def UpperCAmelCase ( self : str ) -> Optional[Any]:
'''simple docstring'''
a__ : int = TFDeiTModelTester(self )
a__ : str = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Optional[Any] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
a__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , tf.keras.layers.Dense ) )
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Dict = model_class(a_ )
a__ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Dict = [*signature.parameters.keys()]
a__ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a_ )
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def UpperCAmelCase ( self : List[Any] , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Optional[Any]=False ) -> Dict:
'''simple docstring'''
a__ : int = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Tuple = TFDeiTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def lowercase__ ( ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self : Any ) -> Tuple:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
a__ : Tuple = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
a__ : Optional[Any] = self.default_image_processor
a__ : Optional[Any] = prepare_img()
a__ : Tuple = image_processor(images=a_ , return_tensors="tf" )
# forward pass
a__ : int = model(**a_ )
# verify the logits
a__ : int = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , a_ )
a__ : Union[str, Any] = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , a_ , atol=1E-4 ) ) | 251 | 1 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_UpperCamelCase : List[str] =re.compile(R'\b(a|an|the)\b', re.UNICODE)
_UpperCamelCase : Optional[int] =None
def a__ () -> Optional[int]:
_A : Union[str, Any] = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=snake_case__ , default=1.0 , help='''Predict \"\" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=snake_case__ , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def a__ (__lowercase :Dict ) -> Tuple:
_A : Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_A : str = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def a__ (__lowercase :Any ) -> Tuple:
def remove_articles(__lowercase :Optional[Any] ):
return ARTICLES_REGEX.sub(''' ''' , snake_case__ )
def white_space_fix(__lowercase :Any ):
return " ".join(text.split() )
def remove_punc(__lowercase :Union[str, Any] ):
_A : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowercase :Optional[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__ ) ) ) )
def a__ (__lowercase :Any ) -> Optional[int]:
if not s:
return []
return normalize_answer(snake_case__ ).split()
def a__ (__lowercase :Dict , __lowercase :List[str] ) -> int:
return int(normalize_answer(snake_case__ ) == normalize_answer(snake_case__ ) )
def a__ (__lowercase :Optional[int] , __lowercase :List[Any] ) -> List[Any]:
_A : Tuple = get_tokens(snake_case__ )
_A : List[str] = get_tokens(snake_case__ )
_A : Any = collections.Counter(snake_case__ ) & collections.Counter(snake_case__ )
_A : Optional[int] = sum(common.values() )
if len(snake_case__ ) == 0 or len(snake_case__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_A : List[Any] = 1.0 * num_same / len(snake_case__ )
_A : Dict = 1.0 * num_same / len(snake_case__ )
_A : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def a__ (__lowercase :List[str] , __lowercase :Tuple ) -> List[Any]:
_A : List[str] = {}
_A : Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_A : Tuple = qa["id"]
_A : Optional[Any] = [t for t in qa["answers"]["text"] if normalize_answer(snake_case__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_A : List[str] = [""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
_A : List[str] = preds[qid]
# Take max over all gold answers
_A : Optional[Any] = max(compute_exact(snake_case__ , snake_case__ ) for a in gold_answers )
_A : Tuple = max(compute_fa(snake_case__ , snake_case__ ) for a in gold_answers )
return exact_scores, fa_scores
def a__ (__lowercase :List[str] , __lowercase :Tuple , __lowercase :Dict , __lowercase :str ) -> int:
_A : List[str] = {}
for qid, s in scores.items():
_A : Optional[int] = na_probs[qid] > na_prob_thresh
if pred_na:
_A : int = float(not qid_to_has_ans[qid] )
else:
_A : str = s
return new_scores
def a__ (__lowercase :Tuple , __lowercase :Dict , __lowercase :Optional[Any]=None ) -> List[str]:
if not qid_list:
_A : Dict = len(snake_case__ )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
_A : Dict = len(snake_case__ )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def a__ (__lowercase :Tuple , __lowercase :List[str] , __lowercase :Optional[int] ) -> Optional[Any]:
for k in new_eval:
_A : List[str] = new_eval[k]
def a__ (__lowercase :int , __lowercase :List[str] , __lowercase :List[str] , __lowercase :Optional[Any] ) -> Tuple:
plt.step(snake_case__ , snake_case__ , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(snake_case__ , snake_case__ , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(snake_case__ )
plt.savefig(snake_case__ )
plt.clf()
def a__ (__lowercase :Dict , __lowercase :Dict , __lowercase :int , __lowercase :Union[str, Any] , __lowercase :List[Any]=None , __lowercase :Optional[int]=None ) -> Optional[Any]:
_A : Union[str, Any] = sorted(snake_case__ , key=lambda __lowercase : na_probs[k] )
_A : Dict = 0.0
_A : str = 1.0
_A : int = 0.0
_A : Dict = [1.0]
_A : Any = [0.0]
_A : List[Any] = 0.0
for i, qid in enumerate(snake_case__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_A : Optional[int] = true_pos / float(i + 1 )
_A : List[str] = true_pos / float(snake_case__ )
if i == len(snake_case__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(snake_case__ )
recalls.append(snake_case__ )
if out_image:
plot_pr_curve(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return {"ap": 100.0 * avg_prec}
def a__ (__lowercase :Dict , __lowercase :Optional[Any] , __lowercase :Union[str, Any] , __lowercase :Tuple , __lowercase :List[str] , __lowercase :str ) -> int:
if out_image_dir and not os.path.exists(snake_case__ ):
os.makedirs(snake_case__ )
_A : Optional[int] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_A : Any = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
_A : Optional[int] = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
_A : Tuple = {k: float(snake_case__ ) for k, v in qid_to_has_ans.items()}
_A : List[str] = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(snake_case__ , snake_case__ , '''pr_exact''' )
merge_eval(snake_case__ , snake_case__ , '''pr_f1''' )
merge_eval(snake_case__ , snake_case__ , '''pr_oracle''' )
def a__ (__lowercase :Optional[int] , __lowercase :Any , __lowercase :Optional[Any] , __lowercase :Optional[Any] ) -> int:
if not qid_list:
return
_A : Dict = [na_probs[k] for k in qid_list]
_A : List[Any] = np.ones_like(snake_case__ ) / float(len(snake_case__ ) )
plt.hist(snake_case__ , weights=snake_case__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(snake_case__ , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def a__ (__lowercase :Optional[int] , __lowercase :Optional[Any] , __lowercase :Optional[Any] , __lowercase :List[str] ) -> Any:
_A : Optional[Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_A : Union[str, Any] = num_no_ans
_A : str = cur_score
_A : str = 0.0
_A : List[str] = sorted(snake_case__ , key=lambda __lowercase : na_probs[k] )
for i, qid in enumerate(snake_case__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_A : Optional[int] = scores[qid]
else:
if preds[qid]:
_A : str = -1
else:
_A : int = 0
cur_score += diff
if cur_score > best_score:
_A : Dict = cur_score
_A : Union[str, Any] = na_probs[qid]
return 100.0 * best_score / len(snake_case__ ), best_thresh
def a__ (__lowercase :int , __lowercase :Union[str, Any] , __lowercase :List[str] , __lowercase :Tuple , __lowercase :Any , __lowercase :Union[str, Any] ) -> List[str]:
_A : int = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_A : Union[str, Any] = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_A : Union[str, Any] = best_exact
_A : List[str] = exact_thresh
_A : str = best_fa
_A : Optional[int] = fa_thresh
def a__ () -> int:
with open(OPTS.data_file ) as f:
_A : Tuple = json.load(snake_case__ )
_A : str = dataset_json["data"]
with open(OPTS.pred_file ) as f:
_A : Optional[Any] = json.load(snake_case__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_A : str = json.load(snake_case__ )
else:
_A : List[Any] = {k: 0.0 for k in preds}
_A : int = make_qid_to_has_ans(snake_case__ ) # maps qid to True/False
_A : List[str] = [k for k, v in qid_to_has_ans.items() if v]
_A : Tuple = [k for k, v in qid_to_has_ans.items() if not v]
_A : Tuple = get_raw_scores(snake_case__ , snake_case__ )
_A : Union[str, Any] = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh )
_A : Optional[int] = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh )
_A : List[Any] = make_eval_dict(snake_case__ , snake_case__ )
if has_ans_qids:
_A : List[str] = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__ )
merge_eval(snake_case__ , snake_case__ , '''HasAns''' )
if no_ans_qids:
_A : str = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__ )
merge_eval(snake_case__ , snake_case__ , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , OPTS.out_image_dir )
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(snake_case__ , snake_case__ )
else:
print(json.dumps(snake_case__ , indent=2 ) )
if __name__ == "__main__":
_UpperCamelCase : List[str] =parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 206 | import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''spiece.model'''}
_snake_case = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class _snake_case ( _lowercase ):
def __init__( self: List[str] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=False , __lowerCamelCase: Tuple=True , __lowerCamelCase: Union[str, Any]=False , __lowerCamelCase: str="<s>" , __lowerCamelCase: str="</s>" , __lowerCamelCase: Tuple="<unk>" , __lowerCamelCase: str="<sep>" , __lowerCamelCase: Optional[int]="<pad>" , __lowerCamelCase: List[Any]="<cls>" , __lowerCamelCase: List[Any]="<mask>" , __lowerCamelCase: int=["<eop>", "<eod>"] , __lowerCamelCase: Optional[Dict[str, Any]] = None , **__lowerCamelCase: Any , ) -> None:
__UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
__UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : str = do_lower_case
__UpperCAmelCase : int = remove_space
__UpperCAmelCase : str = keep_accents
__UpperCAmelCase : List[str] = vocab_file
__UpperCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
__UpperCAmelCase : int = jieba
__UpperCAmelCase : Optional[int] = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowerCamelCase ( self: List[str] ) -> List[Any]:
return len(self.sp_model )
def _lowerCamelCase ( self: Tuple ) -> int:
__UpperCAmelCase : Tuple = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Optional[Any] ) -> int:
__UpperCAmelCase : Dict = self.__dict__.copy()
__UpperCAmelCase : Union[str, Any] = None
return state
def __setstate__( self: List[Any] , __lowerCamelCase: int ) -> Dict:
__UpperCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] ) -> List[str]:
if self.remove_space:
__UpperCAmelCase : List[str] = " ".join(inputs.strip().split() )
else:
__UpperCAmelCase : Tuple = inputs
__UpperCAmelCase : str = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__UpperCAmelCase : Optional[Any] = unicodedata.normalize("NFKD" , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] )
if self.do_lower_case:
__UpperCAmelCase : Optional[int] = outputs.lower()
return outputs
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: str ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = self.preprocess_text(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
__UpperCAmelCase : int = []
for piece in pieces:
if len(__lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__UpperCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__UpperCAmelCase : Optional[int] = cur_pieces[1:]
else:
__UpperCAmelCase : Optional[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCamelCase )
else:
new_pieces.append(__lowerCamelCase )
return new_pieces
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Optional[int] ) -> List[Any]:
return self.sp_model.PieceToId(__lowerCamelCase )
def _lowerCamelCase ( self: str , __lowerCamelCase: int ) -> Optional[Any]:
return self.sp_model.IdToPiece(__lowerCamelCase )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Any ) -> Optional[int]:
__UpperCAmelCase : List[Any] = "".join(__lowerCamelCase ).replace(__lowerCamelCase , " " ).strip()
return out_string
def _lowerCamelCase ( self: Any , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : Any = [self.sep_token_id]
__UpperCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1, 1]
return ([0] * len(__lowerCamelCase )) + [1, 1]
def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : str = [self.sep_token_id]
__UpperCAmelCase : str = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Optional[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
__UpperCAmelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
def _lowerCamelCase ( self: Any , *__lowerCamelCase: List[Any] , **__lowerCamelCase: Optional[Any] ) -> Any:
__UpperCAmelCase : Dict = super()._decode(*__lowerCamelCase , **__lowerCamelCase )
__UpperCAmelCase : Tuple = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 382 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Tuple = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] = ['''FNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] = ['''FNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FNetForMaskedLM''',
'''FNetForMultipleChoice''',
'''FNetForNextSentencePrediction''',
'''FNetForPreTraining''',
'''FNetForQuestionAnswering''',
'''FNetForSequenceClassification''',
'''FNetForTokenClassification''',
'''FNetLayer''',
'''FNetModel''',
'''FNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 715 |
"""simple docstring"""
from __future__ import annotations
__lowerCAmelCase : Optional[int] = 8.988E9 # units = N * m^s * C^-2
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> dict[str, float]:
"""simple docstring"""
a__ = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
a__ = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
a__ = abs(UpperCamelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
a__ = abs(UpperCamelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
a__ = (COULOMBS_CONSTANT * charge_product / abs(UpperCamelCase )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158 | 0 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
def __init__( self: List[str] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Dict=13 , _lowerCAmelCase: Optional[Any]=32 , _lowerCAmelCase: Optional[int]=3 , _lowerCAmelCase: List[str]=4 , _lowerCAmelCase: Any=[10, 20, 30, 40] , _lowerCAmelCase: Tuple=[2, 2, 3, 2] , _lowerCAmelCase: Tuple=True , _lowerCAmelCase: Any=True , _lowerCAmelCase: Union[str, Any]=37 , _lowerCAmelCase: Optional[Any]="gelu" , _lowerCAmelCase: Optional[Any]=10 , _lowerCAmelCase: Optional[Any]=0.02 , _lowerCAmelCase: List[str]=["stage2", "stage3", "stage4"] , _lowerCAmelCase: List[Any]=[2, 3, 4] , _lowerCAmelCase: Any=None , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =image_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =num_stages
UpperCAmelCase_ =hidden_sizes
UpperCAmelCase_ =depths
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =num_labels
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =out_features
UpperCAmelCase_ =out_indices
UpperCAmelCase_ =scope
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ =None
if self.use_labels:
UpperCAmelCase_ =ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ =self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self: int ) -> List[str]:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: Dict , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =ConvNextModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: List[str] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ =ConvNextForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: List[str] , _lowerCAmelCase: Dict , _lowerCAmelCase: List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =ConvNextBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase_ =None
UpperCAmelCase_ =ConvNextBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase__ ( self: Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A ( __lowercase , __lowercase , unittest.TestCase ):
_snake_case =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_snake_case =(
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
_snake_case =True
_snake_case =False
_snake_case =False
_snake_case =False
_snake_case =False
def lowerCAmelCase__ ( self: Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =ConvNextModelTester(self )
UpperCAmelCase_ =ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self: str ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
return
@unittest.skip(reason="ConvNext does not use inputs_embeds" )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings" )
def lowerCAmelCase__ ( self: Dict ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking" )
def lowerCAmelCase__ ( self: List[Any] ) -> Any:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
UpperCAmelCase_ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ =[*signature.parameters.keys()]
UpperCAmelCase_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Any:
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: int ):
UpperCAmelCase_ =model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ =model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
UpperCAmelCase_ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ =self.model_tester.num_stages
self.assertEqual(len(_lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ =True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: str ) -> List[str]:
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ =ConvNextModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self: Tuple ) -> List[Any]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self: List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(_lowerCAmelCase )
UpperCAmelCase_ =self.default_image_processor
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ =model(**_lowerCAmelCase )
# verify the logits
UpperCAmelCase_ =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@require_torch
class A ( unittest.TestCase , __lowercase ):
_snake_case =(ConvNextBackbone,) if is_torch_available() else ()
_snake_case =ConvNextConfig
_snake_case =False
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =ConvNextModelTester(self )
| 54 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : str = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( snake_case_ ):
__magic_name__ : str = '''transfo-xl'''
__magic_name__ : List[str] = ['''mems''']
__magic_name__ : Dict = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , lowercase__ : List[Any]=26_7735 , lowercase__ : Optional[Any]=[2_0000, 4_0000, 20_0000] , lowercase__ : Optional[Any]=1024 , lowercase__ : str=1024 , lowercase__ : Any=16 , lowercase__ : int=64 , lowercase__ : str=4096 , lowercase__ : Union[str, Any]=4 , lowercase__ : List[Any]=False , lowercase__ : List[Any]=18 , lowercase__ : str=1600 , lowercase__ : str=1000 , lowercase__ : Any=True , lowercase__ : Optional[Any]=True , lowercase__ : Union[str, Any]=0 , lowercase__ : str=-1 , lowercase__ : int=True , lowercase__ : str=0.1 , lowercase__ : Optional[Any]=0.0 , lowercase__ : Tuple=True , lowercase__ : Optional[int]="normal" , lowercase__ : str=0.01 , lowercase__ : List[str]=0.01 , lowercase__ : Union[str, Any]=0.02 , lowercase__ : str=1e-5 , lowercase__ : Any=0 , **lowercase__ : List[str] , ):
'''simple docstring'''
a_ : Optional[Any] = vocab_size
a_ : Optional[int] = []
self.cutoffs.extend(lowercase__ )
if proj_share_all_but_first:
a_ : Any = [False] + [True] * len(self.cutoffs )
else:
a_ : Tuple = [False] + [False] * len(self.cutoffs )
a_ : Tuple = d_model
a_ : Optional[int] = d_embed
a_ : List[Any] = d_head
a_ : List[str] = d_inner
a_ : Tuple = div_val
a_ : Dict = pre_lnorm
a_ : Optional[Any] = n_layer
a_ : Dict = n_head
a_ : Any = mem_len
a_ : Union[str, Any] = same_length
a_ : Dict = attn_type
a_ : List[str] = clamp_len
a_ : str = sample_softmax
a_ : Any = adaptive
a_ : List[Any] = dropout
a_ : str = dropatt
a_ : Dict = untie_r
a_ : Tuple = init
a_ : Optional[int] = init_range
a_ : List[Any] = proj_init_std
a_ : Optional[int] = init_std
a_ : int = layer_norm_epsilon
super().__init__(eos_token_id=lowercase__ , **lowercase__ )
@property
def lowercase_ ( self : int ):
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def lowercase_ ( self : Optional[int] , lowercase__ : Tuple ):
'''simple docstring'''
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 442 | 0 |
'''simple docstring'''
import math
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = [True] * n
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Optional[Any] = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
_lowerCAmelCase : Union[str, Any] = i * 2
while index < n:
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : Optional[int] = index + i
_lowerCAmelCase : int = [2]
for i in range(3 , lowerCAmelCase__ , 2 ):
if is_prime[i]:
primes.append(lowerCAmelCase__ )
return primes
def UpperCamelCase_ ( lowerCAmelCase__ = 99_99_66_66_33_33 ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = math.floor(math.sqrt(lowerCAmelCase__ ) ) + 1_00
_lowerCAmelCase : Optional[Any] = prime_sieve(lowerCAmelCase__ )
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Any = 0
_lowerCAmelCase : List[str] = primes[prime_index]
while (last_prime**2) <= limit:
_lowerCAmelCase : Optional[int] = primes[prime_index + 1]
_lowerCAmelCase : List[str] = last_prime**2
_lowerCAmelCase : Union[str, Any] = next_prime**2
# Get numbers divisible by lps(current)
_lowerCAmelCase : Tuple = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_lowerCAmelCase : Union[str, Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_lowerCAmelCase : str = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_lowerCAmelCase : Tuple = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 706 | import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : int = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
_lowerCAmelCase : str = s_dict.pop(lowerCAmelCase__ )
elif "subsample" in key:
_lowerCAmelCase : Optional[Any] = s_dict.pop(lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase : Tuple = emb.weight.shape
_lowerCAmelCase : List[Any] = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Tuple = torch.load(lowerCAmelCase__ , map_location="cpu" )
_lowerCAmelCase : Dict = mam_aaa["args"]
_lowerCAmelCase : Optional[Any] = mam_aaa["model"]
_lowerCAmelCase : Optional[Any] = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(lowerCAmelCase__ )
rename_keys(lowerCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = state_dict["decoder.embed_tokens.weight"].shape[0]
_lowerCAmelCase : Dict = args.share_decoder_input_output_embed
_lowerCAmelCase : str = [int(lowerCAmelCase__ ) for i in args.conv_kernel_sizes.split("," )]
_lowerCAmelCase : Any = SpeechaTextConfig(
vocab_size=lowerCAmelCase__ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(lowerCAmelCase__ ) , conv_channels=args.conv_channels , conv_kernel_sizes=lowerCAmelCase__ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=lowerCAmelCase__ , num_beams=5 , max_length=2_00 , use_cache=lowerCAmelCase__ , decoder_start_token_id=2 , early_stopping=lowerCAmelCase__ , )
_lowerCAmelCase : Union[str, Any] = SpeechaTextForConditionalGeneration(lowerCAmelCase__ )
_lowerCAmelCase , _lowerCAmelCase : List[str] = model.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0 and not set(lowerCAmelCase__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
_lowerCAmelCase : Union[str, Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_lowerCAmelCase : Dict = lm_head_weights
model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
snake_case = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 587 | 0 |
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[str] = name
_lowercase : Optional[Any] = val
def __str__( self ):
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , _lowerCAmelCase ):
return self.val < other.val
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase ):
_lowercase : Dict = {}
_lowercase : List[str] = {}
_lowercase : str = self.build_heap(_lowerCAmelCase )
def __getitem__( self , _lowerCAmelCase ):
return self.get_value(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
return (idx - 1) // 2
def __a ( self , _lowerCAmelCase ):
return idx * 2 + 1
def __a ( self , _lowerCAmelCase ):
return idx * 2 + 2
def __a ( self , _lowerCAmelCase ):
return self.heap_dict[key]
def __a ( self , _lowerCAmelCase ):
_lowercase : Tuple = len(_lowerCAmelCase ) - 1
_lowercase : int = self.get_parent_idx(_lowerCAmelCase )
for idx, i in enumerate(_lowerCAmelCase ):
_lowercase : Tuple = idx
_lowercase : Any = i.val
for i in range(_lowerCAmelCase , -1 , -1 ):
self.sift_down(_lowerCAmelCase , _lowerCAmelCase )
return array
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
while True:
_lowercase : List[str] = self.get_left_child_idx(_lowerCAmelCase ) # noqa: E741
_lowercase : List[Any] = self.get_right_child_idx(_lowerCAmelCase )
_lowercase : Optional[Any] = idx
if l < len(_lowerCAmelCase ) and array[l] < array[idx]:
_lowercase : Tuple = l
if r < len(_lowerCAmelCase ) and array[r] < array[smallest]:
_lowercase : Tuple = r
if smallest != idx:
_lowercase , _lowercase : Dict = array[smallest], array[idx]
(
(
_lowercase
) , (
_lowercase
) ,
) : Union[str, Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
_lowercase : Tuple = smallest
else:
break
def __a ( self , _lowerCAmelCase ):
_lowercase : List[str] = self.get_parent_idx(_lowerCAmelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
_lowercase , _lowercase : List[Any] = self.heap[idx], self.heap[p]
_lowercase , _lowercase : str = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
_lowercase : Any = p
_lowercase : Any = self.get_parent_idx(_lowerCAmelCase )
def __a ( self ):
return self.heap[0]
def __a ( self ):
_lowercase , _lowercase : Dict = self.heap[-1], self.heap[0]
_lowercase , _lowercase : Dict = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
_lowercase : Any = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def __a ( self , _lowerCAmelCase ):
self.heap.append(_lowerCAmelCase )
_lowercase : Optional[int] = len(self.heap ) - 1
_lowercase : Optional[int] = node.val
self.sift_up(len(self.heap ) - 1 )
def __a ( self ):
return len(self.heap ) == 0
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
_lowercase : str = new_value
_lowercase : Union[str, Any] = new_value
self.sift_up(self.idx_of_element[node] )
UpperCamelCase = Node("R", -1)
UpperCamelCase = Node("B", 6)
UpperCamelCase = Node("A", 3)
UpperCamelCase = Node("X", 1)
UpperCamelCase = Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
UpperCamelCase = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __a ( unittest.TestCase ):
"""simple docstring"""
_A : Union[str, Any] = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_A : List[str] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __A ( self : Tuple ,_UpperCamelCase : int ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =AudioClassificationPipeline(model=_UpperCamelCase ,feature_extractor=_UpperCamelCase )
# test with a raw waveform
SCREAMING_SNAKE_CASE__ =np.zeros((3_4_0_0_0,) )
SCREAMING_SNAKE_CASE__ =np.zeros((1_4_0_0_0,) )
return audio_classifier, [audioa, audio]
def __A ( self : Tuple ,_UpperCamelCase : Dict ,_UpperCamelCase : List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =examples
SCREAMING_SNAKE_CASE__ =audio_classifier(_UpperCamelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
_UpperCamelCase ,[
{"""score""": ANY(_UpperCamelCase ), """label""": ANY(_UpperCamelCase )},
{"""score""": ANY(_UpperCamelCase ), """label""": ANY(_UpperCamelCase )},
] ,)
SCREAMING_SNAKE_CASE__ =audio_classifier(_UpperCamelCase ,top_k=1 )
self.assertEqual(
_UpperCamelCase ,[
{"""score""": ANY(_UpperCamelCase ), """label""": ANY(_UpperCamelCase )},
] ,)
self.run_torchaudio(_UpperCamelCase )
@require_torchaudio
def __A ( self : int ,_UpperCamelCase : Any ) -> List[Any]:
'''simple docstring'''
import datasets
# test with a local file
SCREAMING_SNAKE_CASE__ =datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" )
SCREAMING_SNAKE_CASE__ =dataset[0]["""audio"""]["""array"""]
SCREAMING_SNAKE_CASE__ =audio_classifier(_UpperCamelCase )
self.assertEqual(
_UpperCamelCase ,[
{"""score""": ANY(_UpperCamelCase ), """label""": ANY(_UpperCamelCase )},
{"""score""": ANY(_UpperCamelCase ), """label""": ANY(_UpperCamelCase )},
] ,)
@require_torch
def __A ( self : List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ="""anton-l/wav2vec2-random-tiny-classifier"""
SCREAMING_SNAKE_CASE__ =pipeline("""audio-classification""" ,model=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =np.ones((8_0_0_0,) )
SCREAMING_SNAKE_CASE__ =audio_classifier(_UpperCamelCase ,top_k=4 )
SCREAMING_SNAKE_CASE__ =[
{"""score""": 0.0842, """label""": """no"""},
{"""score""": 0.0838, """label""": """up"""},
{"""score""": 0.0837, """label""": """go"""},
{"""score""": 0.0834, """label""": """right"""},
]
SCREAMING_SNAKE_CASE__ =[
{"""score""": 0.0845, """label""": """stop"""},
{"""score""": 0.0844, """label""": """on"""},
{"""score""": 0.0841, """label""": """right"""},
{"""score""": 0.0834, """label""": """left"""},
]
self.assertIn(nested_simplify(_UpperCamelCase ,decimals=4 ) ,[EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
SCREAMING_SNAKE_CASE__ ={"""array""": np.ones((8_0_0_0,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
SCREAMING_SNAKE_CASE__ =audio_classifier(_UpperCamelCase ,top_k=4 )
self.assertIn(nested_simplify(_UpperCamelCase ,decimals=4 ) ,[EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __A ( self : Union[str, Any] ) -> str:
'''simple docstring'''
import datasets
SCREAMING_SNAKE_CASE__ ="""superb/wav2vec2-base-superb-ks"""
SCREAMING_SNAKE_CASE__ =pipeline("""audio-classification""" ,model=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =datasets.load_dataset("""anton-l/superb_dummy""" ,"""ks""" ,split="""test""" )
SCREAMING_SNAKE_CASE__ =np.array(dataset[3]["""speech"""] ,dtype=np.floataa )
SCREAMING_SNAKE_CASE__ =audio_classifier(_UpperCamelCase ,top_k=4 )
self.assertEqual(
nested_simplify(_UpperCamelCase ,decimals=3 ) ,[
{"""score""": 0.981, """label""": """go"""},
{"""score""": 0.007, """label""": """up"""},
{"""score""": 0.006, """label""": """_unknown_"""},
{"""score""": 0.001, """label""": """down"""},
] ,)
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __A ( self : int ) -> Optional[Any]:
'''simple docstring'''
pass
| 151 | 0 |
import socket
def lowerCamelCase ( ) -> str:
_lowerCamelCase = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_lowerCamelCase = socket.gethostname()
_lowerCamelCase = 1_23_12
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
_lowerCamelCase = sock.recv(10_24 )
if not data:
break
out_file.write(UpperCamelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main() | 234 | import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = LxmertTokenizer
lowerCAmelCase_ = LxmertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def _snake_case ( self : List[str] ) -> Any:
super().setUp()
_lowerCamelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _snake_case ( self : Any , snake_case__ : Dict ) -> str:
_lowerCamelCase = 'UNwant\u00E9d,running'
_lowerCamelCase = 'unwanted, running'
return input_text, output_text
def _snake_case ( self : Optional[int] ) -> List[Any]:
_lowerCamelCase = self.tokenizer_class(self.vocab_file )
_lowerCamelCase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(snake_case__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [7, 4, 5, 1_0, 8, 9] )
def _snake_case ( self : Any ) -> List[str]:
if not self.test_rust_tokenizer:
return
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_rust_tokenizer()
_lowerCamelCase = 'I was born in 92000, and this is falsé.'
_lowerCamelCase = tokenizer.tokenize(snake_case__ )
_lowerCamelCase = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_lowerCamelCase = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
_lowerCamelCase = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_lowerCamelCase = self.get_rust_tokenizer()
_lowerCamelCase = tokenizer.encode(snake_case__ )
_lowerCamelCase = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ ) | 234 | 1 |
'''simple docstring'''
def UpperCamelCase ( a ) -> list:
'''simple docstring'''
if any(not isinstance(a__ , a__ ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(a__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(a__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 432 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_ ( lowercase , unittest.TestCase ):
__lowercase : Optional[Any] = None
__lowercase : Tuple = BloomTokenizerFast
__lowercase : Tuple = BloomTokenizerFast
__lowercase : int = True
__lowercase : str = False
__lowercase : int = "tokenizer_file"
__lowercase : List[str] = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def lowercase ( self ) -> str:
"""simple docstring"""
super().setUp()
_UpperCamelCase = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self , **lowerCamelCase_ ) -> Any:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
_UpperCamelCase = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
_UpperCamelCase = tokenizer.batch_encode_plus(lowerCamelCase_ )["input_ids"]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = tokenizer.batch_decode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowercase ( self , lowerCamelCase_=6 ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
_UpperCamelCase = "This is a simple input"
_UpperCamelCase = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase = ("This is a simple input", "This is a pair")
_UpperCamelCase = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(lowerCamelCase_ , max_length=lowerCamelCase_ )
tokenizer_r.encode_plus(lowerCamelCase_ , max_length=lowerCamelCase_ )
tokenizer_r.batch_encode_plus(lowerCamelCase_ , max_length=lowerCamelCase_ )
tokenizer_r.encode(lowerCamelCase_ , max_length=lowerCamelCase_ )
tokenizer_r.batch_encode_plus(lowerCamelCase_ , max_length=lowerCamelCase_ )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
_UpperCamelCase = None # Hotfixing padding = None
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="max_length" )
# Simple input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="max_length" )
# Simple input
self.assertRaises(
lowerCamelCase_ , tokenizer_r.batch_encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="max_length" , )
# Pair input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="max_length" )
# Pair input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="max_length" )
# Pair input
self.assertRaises(
lowerCamelCase_ , tokenizer_r.batch_encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="max_length" , )
def lowercase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = load_dataset("xnli" , "all_languages" , split="test" , streaming=lowerCamelCase_ )
_UpperCamelCase = next(iter(lowerCamelCase_ ) )["premise"] # pick up one data
_UpperCamelCase = list(sample_data.values() )
_UpperCamelCase = list(map(tokenizer.encode , lowerCamelCase_ ) )
_UpperCamelCase = [tokenizer.decode(lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ ) for x in output_tokens]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowercase ( self ) -> Dict:
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 147 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__magic_name__ ={
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ =[
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ =['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__magic_name__ =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 469 | from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __UpperCamelCase ( A ):
for param in module.parameters():
UpperCamelCase__ = False
def __UpperCamelCase ( ):
UpperCamelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase__ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def __UpperCamelCase ( A ):
UpperCamelCase__ = plt.imshow(A )
fig.axes.get_xaxis().set_visible(A )
fig.axes.get_yaxis().set_visible(A )
plt.show()
def __UpperCamelCase ( ):
UpperCamelCase__ = datetime.now()
UpperCamelCase__ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 469 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__SCREAMING_SNAKE_CASE : Dict = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''DPTFeatureExtractor''']
__SCREAMING_SNAKE_CASE : List[str] = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 244 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __UpperCAmelCase ( snake_case_ : str = "isbn/0140328726" ) -> dict:
"""simple docstring"""
_lowerCAmelCase = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
_lowerCAmelCase = F"""{olid} is not a valid Open Library olid"""
raise ValueError(snake_case_ )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def __UpperCAmelCase ( snake_case_ : dict ) -> dict:
"""simple docstring"""
_lowerCAmelCase = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
_lowerCAmelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_lowerCAmelCase = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
_lowerCAmelCase = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase = """, """.join(snake_case_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
SCREAMING_SNAKE_CASE : List[Any] = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (1_0, 1_3) or not isbn.isdigit():
print(F'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(F'\nSearching Open Library for ISBN: {isbn}...\n')
try:
SCREAMING_SNAKE_CASE : Tuple = summarize_book(get_openlibrary_data(F'isbn/{isbn}'))
print('''\n'''.join(F'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'Sorry, there are no results for ISBN: {isbn}.') | 156 | 0 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowercase__ : str = getLogger(__name__)
lowercase__ : Tuple = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : str , __snake_case : str , __snake_case : int = 8 , __snake_case : str = DEFAULT_DEVICE , __snake_case : Tuple=False , __snake_case : List[str]="summarization" , __snake_case : Optional[int]=None , **__snake_case : Tuple , ) -> Dict:
__A : List[str] = Path(__snake_case ).open('w' , encoding='utf-8' )
__A : int = str(__snake_case )
__A : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(__snake_case ).to(__snake_case )
if fpaa:
__A : List[Any] = model.half()
__A : int = AutoTokenizer.from_pretrained(__snake_case )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
__A : Dict = time.time()
# update config with task specific params
use_task_specific_params(__snake_case , __snake_case )
if prefix is None:
__A : List[Any] = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(__snake_case , __snake_case ) ) ):
__A : List[str] = [prefix + text for text in examples_chunk]
__A : List[str] = tokenizer(__snake_case , return_tensors='pt' , truncation=__snake_case , padding='longest' ).to(__snake_case )
__A : str = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **__snake_case , )
__A : Optional[int] = tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
__A : str = int(time.time() - start_time ) # seconds
__A : Tuple = len(__snake_case )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _lowerCAmelCase ( ) -> Optional[Any]:
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def _lowerCAmelCase ( __snake_case : List[Any]=True ) -> Union[str, Any]:
__A : List[Any] = argparse.ArgumentParser()
parser.add_argument('model_name' , type=__snake_case , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=__snake_case , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=__snake_case , help='where to save summaries' )
parser.add_argument('--reference_path' , type=__snake_case , required=__snake_case , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=__snake_case , required=__snake_case , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=__snake_case , required=__snake_case , default=__snake_case , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=__snake_case , required=__snake_case , default=__snake_case , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=__snake_case , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=__snake_case , default=8 , required=__snake_case , help='batch size' )
parser.add_argument(
'--n_obs' , type=__snake_case , default=-1 , required=__snake_case , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=__snake_case , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__A : int = parser.parse_known_args()
__A : Tuple = parse_numeric_n_bool_cl_kwargs(__snake_case )
if parsed_args and verbose:
print(f'parsed the following generate kwargs: {parsed_args}' )
__A : int = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__A : List[Any] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=__snake_case )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f'score_path {args.score_path} will be overwritten unless you type ctrl-c.' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
__A : List[str] = generate_summaries_or_translations(
__snake_case , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **__snake_case , )
if args.reference_path is None:
return {}
# Compute scores
__A : str = calculate_bleu if 'translation' in args.task else calculate_rouge
__A : Any = [x.rstrip() for x in open(args.save_path ).readlines()]
__A : Optional[Any] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(__snake_case )]
__A : dict = score_fn(__snake_case , __snake_case )
scores.update(__snake_case )
if args.dump_args:
scores.update(__snake_case )
if args.info:
__A : List[Any] = args.info
if verbose:
print(__snake_case )
if args.score_path is not None:
json.dump(__snake_case , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True) | 719 |
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def _lowerCAmelCase ( __snake_case : Dataset , __snake_case : Dict[str, str] ) -> Any:
__A : List[str] = args.log_outputs
__A : Tuple = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__A : Tuple = load_metric('wer' )
__A : Union[str, Any] = load_metric('cer' )
# compute metrics
__A : List[str] = wer.compute(references=result['target'] , predictions=result['prediction'] )
__A : str = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__A : List[str] = f'WER: {wer_result}\nCER: {cer_result}'
print(__snake_case )
with open(f'{dataset_id}_eval_results.txt' , 'w' ) as f:
f.write(__snake_case )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__A : Optional[int] = f'log_{dataset_id}_predictions.txt'
__A : List[str] = f'log_{dataset_id}_targets.txt'
with open(__snake_case , 'w' ) as p, open(__snake_case , 'w' ) as t:
# mapping function to write output
def write_to_file(__snake_case : List[str] , __snake_case : List[Any] ):
p.write(f'{i}' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'{i}' + '\n' )
t.write(batch['target'] + '\n' )
result.map(__snake_case , with_indices=__snake_case )
def _lowerCAmelCase ( __snake_case : str ) -> str:
__A : Any = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__A : List[Any] = re.sub(__snake_case , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__A : Optional[int] = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__A : Optional[int] = ' '.join(text.split(__snake_case ) )
return text
def _lowerCAmelCase ( __snake_case : Any ) -> List[Any]:
# load dataset
__A : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__snake_case )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__A : Union[str, Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
__A : Optional[int] = feature_extractor.sampling_rate
# resample audio
__A : List[Any] = dataset.cast_column('audio' , Audio(sampling_rate=__snake_case ) )
# load eval pipeline
if args.device is None:
__A : List[Any] = 0 if torch.cuda.is_available() else -1
__A : Union[str, Any] = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__snake_case : Optional[Any] ):
__A : int = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__A : Optional[Any] = prediction['text']
__A : str = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__A : Dict = dataset.map(__snake_case , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__snake_case , __snake_case )
if __name__ == "__main__":
lowercase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
lowercase__ : Optional[Any] = parser.parse_args()
main(args) | 338 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case ( snake_case : List[Any] , snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
print(F'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(snake_case ):
print(F'{i}\t\t{d}' )
def snake_case ( snake_case : Optional[int] , snake_case : Tuple , snake_case : List[Any] ) -> Dict:
"""simple docstring"""
for j in range(snake_case ):
lowerCAmelCase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def snake_case ( snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : str , snake_case : str ) -> str:
"""simple docstring"""
lowerCAmelCase = [float('inf' )] * vertex_count
lowerCAmelCase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(snake_case ):
lowerCAmelCase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
lowerCAmelCase = distance[u] + w
lowerCAmelCase = check_negative_cycle(snake_case , snake_case , snake_case )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase : Tuple = int(input("Enter number of vertices: ").strip())
_UpperCamelCase : Any = int(input("Enter number of edges: ").strip())
_UpperCamelCase : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
_UpperCamelCase : Dict = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
_UpperCamelCase : Dict = {"""src""": src, """dst""": dest, """weight""": weight}
_UpperCamelCase : List[Any] = int(input("\nEnter shortest path source:").strip())
_UpperCamelCase : List[str] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 284 |
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __lowercase ( snake_case = N ):
"""simple docstring"""
__magic_name__ :Optional[int] = -sys.maxsize - 1
for i in range(len(snake_case ) - 1_2 ):
__magic_name__ :List[Any] = 1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
__magic_name__ :str = product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 0 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A : Any = logging.get_logger(__name__)
_A : str = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class a__ ( a_ ):
__lowerCAmelCase = """poolformer"""
def __init__( self , _a=3 , _a=16 , _a=16 , _a=3 , _a=4.0 , _a=[2, 2, 6, 2] , _a=[64, 128, 320, 512] , _a=[7, 3, 3, 3] , _a=[4, 2, 2, 2] , _a=[2, 1, 1, 1] , _a=4 , _a=0.0 , _a="gelu" , _a=True , _a=1E-5 , _a=0.0_2 , **_a , ):
lowercase : Dict = num_channels
lowercase : int = patch_size
lowercase : str = stride
lowercase : Any = padding
lowercase : str = pool_size
lowercase : Union[str, Any] = hidden_sizes
lowercase : int = mlp_ratio
lowercase : Tuple = depths
lowercase : int = patch_sizes
lowercase : str = strides
lowercase : List[Any] = num_encoder_blocks
lowercase : Dict = drop_path_rate
lowercase : Dict = hidden_act
lowercase : Union[str, Any] = use_layer_scale
lowercase : int = layer_scale_init_value
lowercase : Any = initializer_range
super().__init__(**_a )
class a__ ( a_ ):
__lowerCAmelCase = version.parse("""1.11""" )
@property
def __magic_name__ ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __magic_name__ ( self ):
return 2E-3
| 701 |
"""simple docstring"""
_A : List[str] = 8.3_1_4_4_5_9_8
def __magic_name__ ( __snake_case : float , __snake_case : float ) -> float:
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_A : Union[str, Any] = 3_00
_A : int = 28
_A : Optional[Any] = rms_speed_of_molecule(temperature, molar_mass)
print(F"Vrms of Nitrogen gas at 300 K is {vrms} m/s")
| 518 | 0 |
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCAmelCase__ = open # noqa: we just need to have a builtin inside this module to test it properly
| 596 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __UpperCAmelCase ( lowerCamelCase_) -> int:
if is_torch_version('<' , '2.0.0') or not hasattr(lowerCamelCase_ , '_dynamo'):
return False
return isinstance(lowerCamelCase_ , torch._dynamo.eval_frame.OptimizedModule)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = True) -> List[str]:
UpperCamelCase__ : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
UpperCamelCase__ : List[Any] = is_compiled_module(lowerCamelCase_)
if is_compiled:
UpperCamelCase__ : Union[str, Any] = model
UpperCamelCase__ : Optional[Any] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : str = model.module
if not keep_fpaa_wrapper:
UpperCamelCase__ : Tuple = getattr(lowerCamelCase_ , 'forward')
UpperCamelCase__ : Tuple = model.__dict__.pop('_original_forward' , lowerCamelCase_)
if original_forward is not None:
while hasattr(lowerCamelCase_ , '__wrapped__'):
UpperCamelCase__ : Any = forward.__wrapped__
if forward == original_forward:
break
UpperCamelCase__ : Dict = forward
if getattr(lowerCamelCase_ , '_converted_to_transformer_engine' , lowerCamelCase_):
convert_model(lowerCamelCase_ , to_transformer_engine=lowerCamelCase_)
if is_compiled:
UpperCamelCase__ : List[str] = model
UpperCamelCase__ : List[str] = compiled_model
return model
def __UpperCAmelCase ( ) -> int:
PartialState().wait_for_everyone()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowerCamelCase_ , lowerCamelCase_)
elif PartialState().local_process_index == 0:
torch.save(lowerCamelCase_ , lowerCamelCase_)
@contextmanager
def __UpperCAmelCase ( **lowerCamelCase_) -> Any:
for key, value in kwargs.items():
UpperCamelCase__ : str = str(lowerCamelCase_)
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
if not hasattr(lowerCamelCase_ , '__qualname__') and not hasattr(lowerCamelCase_ , '__name__'):
UpperCamelCase__ : Optional[Any] = getattr(lowerCamelCase_ , '__class__' , lowerCamelCase_)
if hasattr(lowerCamelCase_ , '__qualname__'):
return obj.__qualname__
if hasattr(lowerCamelCase_ , '__name__'):
return obj.__name__
return str(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> List[str]:
for key, value in source.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : List[Any] = destination.setdefault(lowerCamelCase_ , {})
merge_dicts(lowerCamelCase_ , lowerCamelCase_)
else:
UpperCamelCase__ : Union[str, Any] = value
return destination
def __UpperCAmelCase ( lowerCamelCase_ = None) -> bool:
if port is None:
UpperCamelCase__ : List[Any] = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM) as s:
return s.connect_ex(('localhost', port)) == 0
| 596 | 1 |
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase (__lowerCAmelCase ): # This function is recursive
_UpperCAmelCase : Tuple = len(__lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
_UpperCAmelCase : List[Any] = array[0]
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[Any] = [element for element in array[i:] if element >= array[i]]
_UpperCAmelCase : List[str] = longest_subsequence(__lowerCAmelCase )
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = temp_array
else:
i += 1
_UpperCAmelCase : List[str] = [element for element in array[1:] if element >= pivot]
_UpperCAmelCase : Tuple = [pivot, *longest_subsequence(__lowerCAmelCase )]
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
'''simple docstring'''
def __lowerCAmelCase ():
_UpperCAmelCase : str = 0
for i in range(1 , 1_001 ):
total += i**i
return str(__lowerCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 40 | 1 |
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if hor == 128:
lowerCAmelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCAmelCase__ = (32, 128, 256)
lowerCAmelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
lowerCAmelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCAmelCase__ = (32, 64, 128, 256)
lowerCAmelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
lowerCAmelCase__ = torch.load(f"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
lowerCAmelCase__ = model.state_dict()
lowerCAmelCase__ = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
lowerCAmelCase__ = UNetaDModel(**lowerCAmelCase__ )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
lowerCAmelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCAmelCase__ = state_dict.pop(lowerCAmelCase__ )
hf_value_function.load_state_dict(lowerCAmelCase__ )
torch.save(hf_value_function.state_dict() , f"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(f"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , """w""" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
lowerCAmelCase__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
lowerCAmelCase__ = model
lowerCAmelCase__ = UNetaDModel(**lowerCAmelCase__ )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
lowerCAmelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCAmelCase__ = state_dict.pop(lowerCAmelCase__ )
hf_value_function.load_state_dict(lowerCAmelCase__ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 644 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
lowerCamelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase_ = [4, 4, 4, 4]
lowerCamelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
else:
lowerCamelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase_ = 96
elif "small" in model_name:
lowerCamelCase_ = 96
elif "base" in model_name:
lowerCamelCase_ = 128
elif "large" in model_name:
lowerCamelCase_ = 192
elif "xlarge" in model_name:
lowerCamelCase_ = 256
elif "huge" in model_name:
lowerCamelCase_ = 352
# set label information
lowerCamelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCamelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCamelCase_ = '''imagenet-1k-id2label.json'''
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ ,lowerCAmelCase__ ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCamelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = FocalNetConfig(
embed_dim=lowerCAmelCase__ ,depths=lowerCAmelCase__ ,focal_levels=lowerCAmelCase__ ,focal_windows=lowerCAmelCase__ ,use_conv_embed=lowerCAmelCase__ ,idalabel=lowerCAmelCase__ ,labelaid=lowerCAmelCase__ ,use_post_layernorm=lowerCAmelCase__ ,use_layerscale=lowerCAmelCase__ ,)
return config
def lowercase ( lowerCAmelCase__ ):
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCamelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCamelCase_ = name.replace('''encoder.layers''' ,'''encoder.stages''' )
if "downsample.proj" in name:
lowerCamelCase_ = name.replace('''downsample.proj''' ,'''downsample.projection''' )
if "blocks" in name:
lowerCamelCase_ = name.replace('''blocks''' ,'''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase_ = name.replace('''modulation.f''' ,'''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase_ = name.replace('''modulation.h''' ,'''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase_ = name.replace('''modulation.proj''' ,'''modulation.projection_out''' )
if name == "norm.weight":
lowerCamelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCamelCase_ = name.replace('''head''' ,'''classifier''' )
else:
lowerCamelCase_ = '''focalnet.''' + name
return name
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ):
# fmt: off
lowerCamelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCamelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' ,lowerCAmelCase__ )
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowerCAmelCase__ )
lowerCamelCase_ = val
lowerCamelCase_ = get_focalnet_config(lowerCAmelCase__ )
lowerCamelCase_ = FocalNetForImageClassification(lowerCAmelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# verify conversion
lowerCamelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ ,size={'''shortest_edge''': 256} ,resample=PILImageResampling.BILINEAR ,do_center_crop=lowerCAmelCase__ ,crop_size=224 ,do_normalize=lowerCAmelCase__ ,image_mean=lowerCAmelCase__ ,image_std=lowerCAmelCase__ ,)
lowerCamelCase_ = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw )
lowerCamelCase_ = processor(images=lowerCAmelCase__ ,return_tensors='''pt''' )
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
lowerCamelCase_ = image_transforms(lowerCAmelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values ,lowerCAmelCase__ ,atol=1E-4 )
lowerCamelCase_ = model(**lowerCAmelCase__ )
lowerCamelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' ,model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' ,outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase_ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase_ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowerCamelCase_ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase_ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowerCamelCase_ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase_ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase__ ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
A_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 29 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
lowercase_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A : Optional[str] = field(
default=__UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A : Optional[str] = field(
default=__UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A : Optional[str] = field(
default=__UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
A : bool = field(
default=__UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
A : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
A : bool = field(
default=__UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : Optional[str] = field(default=__UpperCamelCase , metadata={"help": "The input training data file (a text file)."} )
A : Optional[str] = field(
default=__UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
A : bool = field(
default=__UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
A : Optional[int] = field(
default=__UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
A : Optional[int] = field(
default=__UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : bool = field(
default=__UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
A : Optional[int] = field(
default=__UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
A : Optional[int] = field(
default=__UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def snake_case__ ( self : List[Any] ):
if self.train_file is not None:
__snake_case : Tuple = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__snake_case : List[Any] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : PreTrainedTokenizerBase
A : Union[bool, str, PaddingStrategy] = True
A : Optional[int] = None
A : Optional[int] = None
def __call__( self : Union[str, Any] , _lowerCAmelCase : List[str] ):
__snake_case : Optional[Any] = """label""" if """label""" in features[0].keys() else """labels"""
__snake_case : Tuple = [feature.pop(_lowerCAmelCase ) for feature in features]
__snake_case : Dict = len(_lowerCAmelCase )
__snake_case : Any = len(features[0]["""input_ids"""] )
__snake_case : Optional[Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(_lowerCAmelCase )] for feature in features
]
__snake_case : List[str] = list(chain(*_lowerCAmelCase ) )
__snake_case : Union[str, Any] = self.tokenizer.pad(
_lowerCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
__snake_case : Union[str, Any] = {k: v.view(_lowerCAmelCase , _lowerCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
__snake_case : Tuple = torch.tensor(_lowerCAmelCase , dtype=torch.intaa )
return batch
def __lowerCAmelCase ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case : List[str] = training_args.get_process_log_level()
logger.setLevel(__SCREAMING_SNAKE_CASE )
datasets.utils.logging.set_verbosity(__SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(__SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__snake_case : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__snake_case : Optional[Any] = {}
if data_args.train_file is not None:
__snake_case : Union[str, Any] = data_args.train_file
if data_args.validation_file is not None:
__snake_case : List[str] = data_args.validation_file
__snake_case : int = data_args.train_file.split(""".""" )[-1]
__snake_case : Dict = load_dataset(
__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__snake_case : Dict = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : Tuple = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__snake_case : List[Any] = [F'''ending{i}''' for i in range(4 )]
__snake_case : Dict = """sent1"""
__snake_case : Optional[int] = """sent2"""
if data_args.max_seq_length is None:
__snake_case : List[Any] = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
__snake_case : List[Any] = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
__snake_case : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__SCREAMING_SNAKE_CASE : Optional[Any] ):
__snake_case : str = [[context] * 4 for context in examples[context_name]]
__snake_case : Any = examples[question_header_name]
__snake_case : Optional[Any] = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(__SCREAMING_SNAKE_CASE )
]
# Flatten out
__snake_case : Dict = list(chain(*__SCREAMING_SNAKE_CASE ) )
__snake_case : Union[str, Any] = list(chain(*__SCREAMING_SNAKE_CASE ) )
# Tokenize
__snake_case : Optional[int] = tokenizer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__snake_case : Optional[Any] = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__snake_case : Optional[Any] = min(len(__SCREAMING_SNAKE_CASE ) , data_args.max_train_samples )
__snake_case : Dict = train_dataset.select(range(__SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
__snake_case : List[str] = train_dataset.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__snake_case : Dict = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__snake_case : Union[str, Any] = min(len(__SCREAMING_SNAKE_CASE ) , data_args.max_eval_samples )
__snake_case : List[Any] = eval_dataset.select(range(__SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
__snake_case : Tuple = eval_dataset.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__snake_case : List[Any] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__SCREAMING_SNAKE_CASE : List[str] ):
__snake_case , __snake_case : Optional[Any] = eval_predictions
__snake_case : Optional[int] = np.argmax(__SCREAMING_SNAKE_CASE , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__snake_case : Optional[int] = Trainer(
model=__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__SCREAMING_SNAKE_CASE , data_collator=__SCREAMING_SNAKE_CASE , compute_metrics=__SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
__snake_case : List[Any] = None
if training_args.resume_from_checkpoint is not None:
__snake_case : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case : str = last_checkpoint
__snake_case : Dict = trainer.train(resume_from_checkpoint=__SCREAMING_SNAKE_CASE )
trainer.save_model() # Saves the tokenizer too for easy upload
__snake_case : Dict = train_result.metrics
__snake_case : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__SCREAMING_SNAKE_CASE )
)
__snake_case : Dict = min(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("""train""" , __SCREAMING_SNAKE_CASE )
trainer.save_metrics("""train""" , __SCREAMING_SNAKE_CASE )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__snake_case : str = trainer.evaluate()
__snake_case : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__SCREAMING_SNAKE_CASE )
__snake_case : Any = min(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("""eval""" , __SCREAMING_SNAKE_CASE )
trainer.save_metrics("""eval""" , __SCREAMING_SNAKE_CASE )
__snake_case : Dict = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**__SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**__SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 390 | def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
__snake_case : int = 1
__snake_case : Any = 2
while i * i <= n:
__snake_case : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : str = 1
__snake_case : Dict = 1
while True:
i += 1
t_num += i
if count_divisors(__SCREAMING_SNAKE_CASE ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 390 | 1 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:Any , *_a:List[Any] , **_a:Any ):
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 33 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''llama'''
__lowerCAmelCase = ['''past_key_values''']
def __init__( self , _UpperCAmelCase=32000 , _UpperCAmelCase=4096 , _UpperCAmelCase=11008 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase="silu" , _UpperCAmelCase=2048 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=None , **_UpperCAmelCase , ):
__a : Dict = vocab_size
__a : Union[str, Any] = max_position_embeddings
__a : str = hidden_size
__a : List[str] = intermediate_size
__a : Any = num_hidden_layers
__a : int = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__a : Union[str, Any] = num_attention_heads
__a : Optional[int] = num_key_value_heads
__a : Dict = hidden_act
__a : Union[str, Any] = initializer_range
__a : int = rms_norm_eps
__a : Optional[int] = pretraining_tp
__a : Optional[Any] = use_cache
__a : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def _lowerCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
__a : Tuple = self.rope_scaling.get('''type''' , _UpperCAmelCase )
__a : Optional[int] = self.rope_scaling.get('''factor''' , _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" ) | 52 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowerCamelCase ( ) -> List[Any]:
lowerCamelCase_ : int = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=A__ )
lowerCamelCase_ : Dict = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=A__ )
env_command_parser(subparsers=A__ )
launch_command_parser(subparsers=A__ )
tpu_command_parser(subparsers=A__ )
test_command_parser(subparsers=A__ )
# Let's go
lowerCamelCase_ : List[str] = parser.parse_args()
if not hasattr(A__ , """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(A__ )
if __name__ == "__main__":
main()
| 171 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __lowerCamelCase ( A__ : Optional[int] , A__ : Union[str, Any] , A__ : Tuple=1e-12 ) -> str:
lowerCamelCase_ : Optional[int] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T
lowerCamelCase_ : List[str] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T
return jnp.matmul(A__ , norm_emb_a.T )
class SCREAMING_SNAKE_CASE_ (nn.Module ):
'''simple docstring'''
_a = 42
_a = jnp.floataa
def _lowerCAmelCase ( self : str ) ->str:
lowerCamelCase_ : Optional[Any] = FlaxCLIPVisionModule(self.config.vision_config )
lowerCamelCase_ : Optional[int] = nn.Dense(self.config.projection_dim , use_bias=__a , dtype=self.dtype )
lowerCamelCase_ : str = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
lowerCamelCase_ : List[Any] = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
lowerCamelCase_ : List[Any] = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,) )
lowerCamelCase_ : List[Any] = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,) )
def __call__( self : Any , __a : Dict ) ->Optional[Any]:
lowerCamelCase_ : Optional[int] = self.vision_model(__a )[1]
lowerCamelCase_ : str = self.visual_projection(__a )
lowerCamelCase_ : Tuple = jax_cosine_distance(__a , self.special_care_embeds )
lowerCamelCase_ : Any = jax_cosine_distance(__a , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCamelCase_ : Dict = 0.0
lowerCamelCase_ : Optional[int] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCamelCase_ : int = jnp.round(__a , 3 )
lowerCamelCase_ : str = jnp.any(special_scores > 0 , axis=1 , keepdims=__a )
# Use a lower threshold if an image has any special care concept
lowerCamelCase_ : List[Any] = is_special_care * 0.01
lowerCamelCase_ : List[str] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCamelCase_ : Union[str, Any] = jnp.round(__a , 3 )
lowerCamelCase_ : Optional[int] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = CLIPConfig
_a = "clip_input"
_a = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : List[Any] , __a : CLIPConfig , __a : Optional[Tuple] = None , __a : int = 0 , __a : jnp.dtype = jnp.floataa , __a : bool = True , **__a : int , ) ->List[Any]:
if input_shape is None:
lowerCamelCase_ : str = (1, 224, 224, 3)
lowerCamelCase_ : List[Any] = self.module_class(config=__a , dtype=__a , **__a )
super().__init__(__a , __a , input_shape=__a , seed=__a , dtype=__a , _do_init=_do_init )
def _lowerCAmelCase ( self : Tuple , __a : jax.random.KeyArray , __a : Tuple , __a : FrozenDict = None ) ->FrozenDict:
# init input tensor
lowerCamelCase_ : Optional[Any] = jax.random.normal(__a , __a )
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = jax.random.split(__a )
lowerCamelCase_ : Dict = {"""params""": params_rng, """dropout""": dropout_rng}
lowerCamelCase_ : Tuple = self.module.init(__a , __a )["""params"""]
return random_params
def __call__( self : List[Any] , __a : List[str] , __a : dict = None , ) ->int:
lowerCamelCase_ : List[str] = jnp.transpose(__a , (0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params} , jnp.array(__a , dtype=jnp.floataa ) , rngs={} , )
| 171 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = (UniPCMultistepScheduler,)
lowerCamelCase__ = (("num_inference_steps", 2_5),)
def _snake_case ( self : Optional[int] , **__lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__lowerCamelCase )
return config
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Optional[Any]=0 , **__lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(__lowerCamelCase )
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sample, sample
for t in range(__lowerCamelCase , time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _snake_case ( self : int , __lowerCamelCase : Tuple=0 , **__lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(__lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _snake_case ( self : Any , __lowerCamelCase : str=None , **__lowerCamelCase : str ):
if scheduler is None:
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
return sample
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , __lowerCamelCase )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__lowerCamelCase )
elif num_inference_steps is not None and not hasattr(__lowerCamelCase , "set_timesteps" ):
SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10]
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self : int ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
SCREAMING_SNAKE_CASE = UniPCMultistepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE = self.full_loop(scheduler=__lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
SCREAMING_SNAKE_CASE = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = self.full_loop(scheduler=__lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def _snake_case ( self : Any ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def _snake_case ( self : List[str] ):
self.check_over_configs(thresholding=__lowerCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , solver_order=__lowerCamelCase , solver_type=__lowerCamelCase , )
def _snake_case ( self : Optional[int] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCamelCase , solver_type=__lowerCamelCase , prediction_type=__lowerCamelCase , )
SCREAMING_SNAKE_CASE = self.full_loop(
solver_order=__lowerCamelCase , solver_type=__lowerCamelCase , prediction_type=__lowerCamelCase , )
assert not torch.isnan(__lowerCamelCase ).any(), "Samples have nan numbers"
def _snake_case ( self : Tuple ):
self.check_over_configs(lower_order_final=__lowerCamelCase )
self.check_over_configs(lower_order_final=__lowerCamelCase )
def _snake_case ( self : List[str] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__lowerCamelCase , time_step=0 )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.full_loop()
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.full_loop(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.1_014 ) < 1e-3
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(thresholding=__lowerCamelCase , dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def _snake_case ( self : Optional[int] , **__lowerCamelCase : List[str] ):
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps | 16 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=_SCREAMING_SNAKE_CASE ):
snake_case = ["speech"]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(self , ["""speech"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_SCREAMING_SNAKE_CASE ):
snake_case = ["speech"]
def __init__( self : int , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Dict ):
requires_backends(self , ["""speech"""] )
| 129 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class snake_case_ ( a_ ):
__lowerCAmelCase = 42
__lowerCAmelCase = 42
class snake_case_ ( nn.Module ):
__lowerCAmelCase = 42
__lowerCAmelCase = (1_6, 3_2, 9_6, 2_5_6)
__lowerCAmelCase = jnp.floataa
def snake_case_ ( self ):
a_ : Any = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a_ : Tuple = []
for i in range(len(self.block_out_channels ) - 1 ):
a_ : Tuple = self.block_out_channels[i]
a_ : Any = self.block_out_channels[i + 1]
a_ : Dict = nn.Conv(
a_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(a_ )
a_ : Optional[int] = nn.Conv(
a_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(a_ )
a_ : int = blocks
a_ : str = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , a_ ):
a_ : Dict = self.conv_in(a_ )
a_ : Optional[Any] = nn.silu(a_ )
for block in self.blocks:
a_ : Tuple = block(a_ )
a_ : Union[str, Any] = nn.silu(a_ )
a_ : List[str] = self.conv_out(a_ )
return embedding
@flax_register_to_config
class snake_case_ ( nn.Module ,a_ ,a_ ):
__lowerCAmelCase = 3_2
__lowerCAmelCase = 4
__lowerCAmelCase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__lowerCAmelCase = False
__lowerCAmelCase = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
__lowerCAmelCase = 2
__lowerCAmelCase = 8
__lowerCAmelCase = None
__lowerCAmelCase = 1_2_8_0
__lowerCAmelCase = 0.0
__lowerCAmelCase = False
__lowerCAmelCase = jnp.floataa
__lowerCAmelCase = True
__lowerCAmelCase = 0
__lowerCAmelCase = "rgb"
__lowerCAmelCase = (1_6, 3_2, 9_6, 2_5_6)
def snake_case_ ( self , a_ ):
# init input tensors
a_ : Any = (1, self.in_channels, self.sample_size, self.sample_size)
a_ : Tuple = jnp.zeros(a_ , dtype=jnp.floataa )
a_ : Optional[int] = jnp.ones((1,) , dtype=jnp.intaa )
a_ : Any = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
a_ : Union[str, Any] = (1, 3, self.sample_size * 8, self.sample_size * 8)
a_ : Optional[int] = jnp.zeros(a_ , dtype=jnp.floataa )
a_ , a_ : List[Any] = jax.random.split(a_ )
a_ : Dict = {"params": params_rng, "dropout": dropout_rng}
return self.init(a_ , a_ , a_ , a_ , a_ )["params"]
def snake_case_ ( self ):
a_ : List[Any] = self.block_out_channels
a_ : Any = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
a_ : Optional[int] = self.num_attention_heads or self.attention_head_dim
# input
a_ : Optional[int] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
a_ : Optional[Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
a_ : List[str] = FlaxTimestepEmbedding(a_ , dtype=self.dtype )
a_ : Optional[int] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
a_ : int = self.only_cross_attention
if isinstance(a_ , a_ ):
a_ : Tuple = (only_cross_attention,) * len(self.down_block_types )
if isinstance(a_ , a_ ):
a_ : int = (num_attention_heads,) * len(self.down_block_types )
# down
a_ : Any = []
a_ : Optional[Any] = []
a_ : List[Any] = block_out_channels[0]
a_ : Dict = nn.Conv(
a_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a_ )
for i, down_block_type in enumerate(self.down_block_types ):
a_ : int = output_channel
a_ : Optional[int] = block_out_channels[i]
a_ : str = i == len(a_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
a_ : int = FlaxCrossAttnDownBlockaD(
in_channels=a_ , out_channels=a_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
a_ : Union[str, Any] = FlaxDownBlockaD(
in_channels=a_ , out_channels=a_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(a_ )
for _ in range(self.layers_per_block ):
a_ : Tuple = nn.Conv(
a_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a_ )
if not is_final_block:
a_ : Optional[Any] = nn.Conv(
a_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a_ )
a_ : Union[str, Any] = down_blocks
a_ : Optional[int] = controlnet_down_blocks
# mid
a_ : Union[str, Any] = block_out_channels[-1]
a_ : Any = FlaxUNetMidBlockaDCrossAttn(
in_channels=a_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
a_ : Union[str, Any] = nn.Conv(
a_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , a_ , a_ , a_ , a_ , a_ = 1.0 , a_ = True , a_ = False , ):
a_ : int = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
a_ : Tuple = jnp.flip(a_ , axis=1 )
# 1. time
if not isinstance(a_ , jnp.ndarray ):
a_ : Optional[int] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(a_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
a_ : Any = timesteps.astype(dtype=jnp.floataa )
a_ : List[Any] = jnp.expand_dims(a_ , 0 )
a_ : List[str] = self.time_proj(a_ )
a_ : Union[str, Any] = self.time_embedding(a_ )
# 2. pre-process
a_ : Union[str, Any] = jnp.transpose(a_ , (0, 2, 3, 1) )
a_ : Union[str, Any] = self.conv_in(a_ )
a_ : Optional[int] = jnp.transpose(a_ , (0, 2, 3, 1) )
a_ : int = self.controlnet_cond_embedding(a_ )
sample += controlnet_cond
# 3. down
a_ : List[str] = (sample,)
for down_block in self.down_blocks:
if isinstance(a_ , a_ ):
a_ , a_ : int = down_block(a_ , a_ , a_ , deterministic=not train )
else:
a_ , a_ : Union[str, Any] = down_block(a_ , a_ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
a_ : Optional[Any] = self.mid_block(a_ , a_ , a_ , deterministic=not train )
# 5. contronet blocks
a_ : Optional[int] = ()
for down_block_res_sample, controlnet_block in zip(a_ , self.controlnet_down_blocks ):
a_ : Tuple = controlnet_block(a_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
a_ : Union[str, Any] = controlnet_down_block_res_samples
a_ : Optional[int] = self.controlnet_mid_block(a_ )
# 6. scaling
a_ : Optional[Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=a_ , mid_block_res_sample=a_ ) | 370 |
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Tuple:
a_ : List[Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224", out_features=["stage1", "stage2", "stage3", "stage4"] )
a_ : List[str] = MaskFormerConfig(backbone_config=SCREAMING_SNAKE_CASE__ )
a_ : Dict = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
a_ : List[str] = 847
a_ : Optional[Any] = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
a_ : List[str] = 150
a_ : Tuple = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
a_ : Union[str, Any] = 171
a_ : Union[str, Any] = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
a_ : Optional[Any] = 133
a_ : List[str] = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
a_ : Union[str, Any] = 19
a_ : Any = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
a_ : int = 65
a_ : Any = "mapillary-vistas-id2label.json"
a_ : List[str] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, repo_type="dataset" ), "r" ) )
a_ : Optional[Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
a_ : str = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> int:
a_ : Optional[int] = dct.pop(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = val
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> List[Any]:
a_ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a_ : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a_ : List[str] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
a_ : str = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
a_ : Tuple = in_proj_weight[:dim, :]
a_ : Union[str, Any] = in_proj_bias[: dim]
a_ : Dict = in_proj_weight[
dim : dim * 2, :
]
a_ : Tuple = in_proj_bias[
dim : dim * 2
]
a_ : Optional[int] = in_proj_weight[
-dim :, :
]
a_ : str = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Dict:
# fmt: off
a_ : List[str] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
a_ : Optional[int] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
a_ : int = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a_ : int = in_proj_weight[: hidden_size, :]
a_ : Tuple = in_proj_bias[:config.hidden_size]
a_ : List[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
a_ : Dict = in_proj_bias[hidden_size : hidden_size * 2]
a_ : Optional[int] = in_proj_weight[-hidden_size :, :]
a_ : List[str] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
a_ : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
a_ : str = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a_ : Dict = in_proj_weight[: hidden_size, :]
a_ : Optional[Any] = in_proj_bias[:config.hidden_size]
a_ : List[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
a_ : str = in_proj_bias[hidden_size : hidden_size * 2]
a_ : List[Any] = in_proj_weight[-hidden_size :, :]
a_ : Dict = in_proj_bias[-hidden_size :]
# fmt: on
def lowerCAmelCase_ ( ) -> torch.Tensor:
a_ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE__, stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = False ) -> Optional[int]:
a_ : List[Any] = get_maskformer_config(SCREAMING_SNAKE_CASE__ )
# load original state_dict
with open(SCREAMING_SNAKE_CASE__, "rb" ) as f:
a_ : int = pickle.load(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
a_ : List[Any] = create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
read_in_swin_q_k_v(SCREAMING_SNAKE_CASE__, config.backbone_config )
read_in_decoder_q_k_v(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# update to torch tensors
for key, value in state_dict.items():
a_ : Tuple = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# load 🤗 model
a_ : Tuple = MaskFormerForInstanceSegmentation(SCREAMING_SNAKE_CASE__ )
model.eval()
for name, param in model.named_parameters():
print(SCREAMING_SNAKE_CASE__, param.shape )
a_ , a_ : int = model.load_state_dict(SCREAMING_SNAKE_CASE__, strict=SCREAMING_SNAKE_CASE__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(SCREAMING_SNAKE_CASE__ ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
a_ : List[Any] = prepare_img()
if "vistas" in model_name:
a_ : int = 65
elif "cityscapes" in model_name:
a_ : str = 65_535
else:
a_ : int = 255
a_ : List[str] = True if "ade" in model_name else False
a_ : Any = MaskFormerImageProcessor(ignore_index=SCREAMING_SNAKE_CASE__, reduce_labels=SCREAMING_SNAKE_CASE__ )
a_ : int = image_processor(SCREAMING_SNAKE_CASE__, return_tensors="pt" )
a_ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
print("Logits:", outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
a_ : Union[str, Any] = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3], SCREAMING_SNAKE_CASE__, atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 370 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=_snake_case ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ['onnx']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ["onnx"] )
@classmethod
def _lowerCamelCase ( cls , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ["onnx"] )
@classmethod
def _lowerCamelCase ( cls , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ["onnx"] )
| 245 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __a ( _snake_case ):
__UpperCamelCase : Any = ''
__UpperCamelCase : int = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : Any ,lowerCamelCase : Optional[DatasetInfo] = None ,lowerCamelCase : Optional[str] = None ,**lowerCamelCase : Dict ,):
'''simple docstring'''
super().__init__(self ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = repo_info
__SCREAMING_SNAKE_CASE = token
__SCREAMING_SNAKE_CASE = None
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if self.dir_cache is None:
__SCREAMING_SNAKE_CASE = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__SCREAMING_SNAKE_CASE = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(lowerCamelCase ): {"""name""": str(lowerCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ,lowerCamelCase : str = "rb" ,**lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
if not isinstance(self.repo_info ,lowerCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__SCREAMING_SNAKE_CASE = hf_hub_url(self.repo_info.id ,lowerCamelCase ,revision=self.repo_info.sha )
return fsspec.open(
lowerCamelCase ,mode=lowerCamelCase ,headers=get_authentication_headers_for_url(lowerCamelCase ,use_auth_token=self.token ) ,client_kwargs={"""trust_env""": True} ,).open()
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Any ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
self._get_dirs()
__SCREAMING_SNAKE_CASE = self._strip_protocol(lowerCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCamelCase )
def UpperCAmelCase__ ( self : str ,lowerCamelCase : Any ,lowerCamelCase : str=False ,**lowerCamelCase : Any ):
'''simple docstring'''
self._get_dirs()
__SCREAMING_SNAKE_CASE = PurePosixPath(path.strip("""/""" ) )
__SCREAMING_SNAKE_CASE = {}
for p, f in self.dir_cache.items():
__SCREAMING_SNAKE_CASE = PurePosixPath(p.strip("""/""" ) )
__SCREAMING_SNAKE_CASE = p.parent
if root == path:
__SCREAMING_SNAKE_CASE = f
__SCREAMING_SNAKE_CASE = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 109 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
_lowerCamelCase : Union[str, Any] = {'target_lang': 'fi', 'source_lang': 'en'}
_lowerCamelCase : Union[str, Any] = '>>zh<<'
_lowerCamelCase : Dict = 'Helsinki-NLP/'
if is_torch_available():
_lowerCamelCase : Tuple = 'pt'
elif is_tf_available():
_lowerCamelCase : Tuple = 'tf'
else:
_lowerCamelCase : List[Any] = 'jax'
@require_sentencepiece
class snake_case__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
__A = MarianTokenizer
__A = False
__A = True
def UpperCamelCase ( self : Optional[Any] ) -> str:
super().setUp()
UpperCAmelCase_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
UpperCAmelCase_ = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
UpperCAmelCase_ = Path(self.tmpdirname )
save_json(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
UpperCAmelCase_ = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self : Tuple , **lowerCAmelCase_ : Optional[Any] ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCamelCase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] ) -> Tuple:
return (
"This is a test",
"This is a test",
)
def UpperCamelCase ( self : str ) -> List[Any]:
UpperCAmelCase_ = '''</s>'''
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def UpperCamelCase ( self : str ) -> str:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(lowerCAmelCase_ ) , 9 )
def UpperCamelCase ( self : Optional[Any] ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def UpperCamelCase ( self : int ) -> Optional[int]:
UpperCAmelCase_ = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
UpperCAmelCase_ = en_de_tokenizer(['''I am a small frog'''] , return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(lowerCAmelCase_ , batch.input_ids[0] )
UpperCAmelCase_ = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ = [x.name for x in Path(lowerCAmelCase_ ).glob('''*''' )]
self.assertIn('''source.spm''' , lowerCAmelCase_ )
MarianTokenizer.from_pretrained(lowerCAmelCase_ )
def UpperCamelCase ( self : int ) -> Optional[int]:
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tok(
['''I am a small frog''' * 10_00, '''I am a small frog'''] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def UpperCamelCase ( self : List[Any] ) -> Optional[int]:
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def UpperCamelCase ( self : Any ) -> Dict:
UpperCAmelCase_ = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
UpperCAmelCase_ = '''Tämä on testi'''
UpperCAmelCase_ = '''This is a test'''
UpperCAmelCase_ = [76, 7, 20_47, 2]
UpperCAmelCase_ = [69, 12, 11, 9_40, 2]
UpperCAmelCase_ = tokenizer(lowerCAmelCase_ ).input_ids
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ = tokenizer(text_target=lowerCAmelCase_ ).input_ids
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ = tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 407 |
def _lowerCAmelCase ( __magic_name__ :int , __magic_name__ :int ):
return int((input_a, input_a).count(0 ) == 0 )
def _lowerCAmelCase ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 407 | 1 |
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase__ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
UpperCamelCase__ = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
lowercase_ : str = True
# Deal with multi-line cases
elif (
re.search(
RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , _UpperCamelCase , )
is not None
):
lowercase_ : List[str] = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowercase_ : Any = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowercase_ : Union[str, Any] = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
lowercase_ : Tuple = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
lowercase_ : List[str] = True
if not attribute_used:
lowercase_ : Optional[Any] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowercase_ : Dict = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowercase_ : Any = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowercase_ : Optional[Any] = True
elif attribute.endswith("_token_id" ):
lowercase_ : Union[str, Any] = True
# configuration class specific cases
if not case_allowed:
lowercase_ : Dict = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowercase_ : Optional[Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = dict(inspect.signature(config_class.__init__ ).parameters )
lowercase_ : Union[str, Any] = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
lowercase_ : Any = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowercase_ : List[str] = {}
if len(config_class.attribute_map ) > 0:
lowercase_ : Union[str, Any] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowercase_ : List[Any] = inspect.getsourcefile(_UpperCamelCase )
lowercase_ : Tuple = os.path.dirname(_UpperCamelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowercase_ : List[Any] = [os.path.join(_UpperCamelCase , _UpperCamelCase ) for fn in os.listdir(_UpperCamelCase ) if fn.startswith("modeling_" )]
# Get the source code strings
lowercase_ : Any = []
for path in modeling_paths:
if os.path.isfile(_UpperCamelCase ):
with open(_UpperCamelCase ) as fp:
modeling_sources.append(fp.read() )
lowercase_ : Union[str, Any] = []
for config_param, default_value in zip(_UpperCamelCase , _UpperCamelCase ):
# `attributes` here is all the variant names for `config_param`
lowercase_ : Tuple = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
unused_attributes.append(attributes[0] )
return sorted(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : str = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowercase_ : Any = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _UpperCamelCase : inspect.isclass(_UpperCamelCase )
and issubclass(_UpperCamelCase , _UpperCamelCase )
and inspect.getmodule(_UpperCamelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowercase_ : Optional[Any] = check_config_attributes_being_used(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
lowercase_ : int = unused_attributes
if len(_UpperCamelCase ) > 0:
lowercase_ : Dict = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(_UpperCamelCase )
if __name__ == "__main__":
check_config_attributes()
| 620 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
_lowercase = parser.parse_args()
_lowercase = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_lowercase = CLIPImageProcessor()
_lowercase = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
_lowercase = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 342 | 0 |
"""simple docstring"""
lowerCamelCase__ : Dict = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 18 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any=13 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :int=[1, 2, 1] , lowerCamelCase_ :str=[2, 2, 4] , lowerCamelCase_ :str=2 , lowerCamelCase_ :Tuple=2.0 , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :str=0.0 , lowerCamelCase_ :Optional[int]=0.0 , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :str=False , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :List[Any]=1E-5 , lowerCamelCase_ :int=True , lowerCamelCase_ :str=None , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Union[str, Any]=10 , lowerCamelCase_ :List[Any]=8 , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Any = embed_dim
SCREAMING_SNAKE_CASE : int = depths
SCREAMING_SNAKE_CASE : List[str] = num_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = window_size
SCREAMING_SNAKE_CASE : Optional[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : List[Any] = qkv_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = drop_path_rate
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Any = patch_norm
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : List[Any] = scope
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_stride
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :str , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : List[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = SwinvaModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowerCamelCase_ , embed_dim=37 )
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def __lowerCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
SCREAMING_SNAKE_CASE : Tuple = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = config.window_size**2
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : int = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
SCREAMING_SNAKE_CASE : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
SCREAMING_SNAKE_CASE : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states , len(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# Swinv2 has a different seq_length
SCREAMING_SNAKE_CASE : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE : Any = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE : Optional[int] = (
reshaped_hidden_states[0].view(lowerCamelCase_ , lowerCamelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :Dict ) -> List[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 1 |
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Any=False ):
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = len(set_a.intersection(lowerCAmelCase ) )
if alternative_union:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(lowerCAmelCase ) + len(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Any = len(set_a.union(lowerCAmelCase ) )
return intersection / union
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(lowerCAmelCase , (list, tuple) ):
SCREAMING_SNAKE_CASE_ : List[Any] = [element for element in set_a if element in set_b]
if alternative_union:
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCAmelCase ) + len(lowerCAmelCase )
return len(lowerCAmelCase ) / union
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = set_a + [element for element in set_b if element not in set_a]
return len(lowerCAmelCase ) / len(lowerCAmelCase )
return len(lowerCAmelCase ) / len(lowerCAmelCase )
return None
if __name__ == "__main__":
__lowerCamelCase : str = {'''a''', '''b''', '''c''', '''d''', '''e'''}
__lowerCamelCase : Union[str, Any] = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 216 | import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__lowerCamelCase : Dict = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a__ ( datasets.BuilderConfig ):
A = None
def _snake_case ( lowerCAmelCase : "pyspark.sql.DataFrame" , lowerCAmelCase : List[int] , ):
"""simple docstring"""
import pyspark
def generate_fn():
SCREAMING_SNAKE_CASE_ : Optional[Any] = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
SCREAMING_SNAKE_CASE_ : Optional[Any] = df_with_partition_id.select("*" ).where(f'part_id = {partition_id}' ).drop("part_id" )
SCREAMING_SNAKE_CASE_ : List[str] = partition_df.collect()
SCREAMING_SNAKE_CASE_ : Tuple = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class a__ ( _BaseExamplesIterable ):
def __init__( self : Union[str, Any],_A : "pyspark.sql.DataFrame",_A : Any=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = df
SCREAMING_SNAKE_CASE_ : Tuple = partition_order or range(self.df.rdd.getNumPartitions() )
SCREAMING_SNAKE_CASE_ : Optional[Any] = _generate_iterable_examples(self.df,self.partition_order )
def __iter__( self : Union[str, Any] ):
"""simple docstring"""
yield from self.generate_examples_fn()
def __UpperCamelCase ( self : int,_A : np.random.Generator ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(_A )
return SparkExamplesIterable(self.df,partition_order=_A )
def __UpperCamelCase ( self : str,_A : int,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.split_shard_indices_by_worker(_A,_A )
return SparkExamplesIterable(self.df,partition_order=_A )
@property
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return len(self.partition_order )
class a__ ( datasets.DatasetBuilder ):
A = SparkConfig
def __init__( self : List[str],_A : "pyspark.sql.DataFrame",_A : str = None,_A : str = None,**_A : Optional[Any],):
"""simple docstring"""
import pyspark
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pyspark.sql.SparkSession.builder.getOrCreate()
SCREAMING_SNAKE_CASE_ : Optional[int] = df
SCREAMING_SNAKE_CASE_ : Dict = working_dir
super().__init__(
cache_dir=_A,config_name=str(self.df.semanticHash() ),**_A,)
def __UpperCamelCase ( self : int ):
"""simple docstring"""
def create_cache_and_write_probe(_A : int ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir,exist_ok=_A )
SCREAMING_SNAKE_CASE_ : int = os.path.join(self._cache_dir,"fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(_A,"a" )
return [probe_file]
if self._spark.conf.get("spark.master","" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
self._spark.sparkContext.parallelize(range(1 ),1 ).mapPartitions(_A ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def __UpperCamelCase ( self : Tuple,_A : datasets.download.download_manager.DownloadManager ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __UpperCamelCase ( self : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
import pyspark
def get_arrow_batch_size(_A : Optional[Any] ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.df.count()
SCREAMING_SNAKE_CASE_ : str = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
SCREAMING_SNAKE_CASE_ : List[str] = (
self.df.limit(_A )
.repartition(1 )
.mapInArrow(_A,"batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
SCREAMING_SNAKE_CASE_ : str = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
SCREAMING_SNAKE_CASE_ : int = min(_A,int(approx_total_size / max_shard_size ) )
SCREAMING_SNAKE_CASE_ : List[Any] = self.df.repartition(_A )
def __UpperCamelCase ( self : Any,_A : str,_A : str,_A : int,):
"""simple docstring"""
import pyspark
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ParquetWriter if file_format == "parquet" else ArrowWriter
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(self._working_dir,os.path.basename(_A ) ) if self._working_dir else fpath
SCREAMING_SNAKE_CASE_ : Tuple = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
SCREAMING_SNAKE_CASE_ : Dict = self.config.features
SCREAMING_SNAKE_CASE_ : Optional[int] = self._writer_batch_size
SCREAMING_SNAKE_CASE_ : Tuple = self._fs.storage_options
def write_arrow(_A : Optional[int] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
SCREAMING_SNAKE_CASE_ : Any = pyspark.TaskContext().taskAttemptId()
SCREAMING_SNAKE_CASE_ : List[Any] = next(_A,_A )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]],names=["task_id", "num_examples", "num_bytes"],)
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : Tuple = writer_class(
features=_A,path=working_fpath.replace("SSSSS",F'{shard_id:05d}' ).replace("TTTTT",F'{task_id:05d}' ),writer_batch_size=_A,storage_options=_A,embed_local_files=_A,)
SCREAMING_SNAKE_CASE_ : Dict = pa.Table.from_batches([first_batch] )
writer.write_table(_A )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]],names=["task_id", "num_examples", "num_bytes"],)
shard_id += 1
SCREAMING_SNAKE_CASE_ : List[str] = writer_class(
features=writer._features,path=working_fpath.replace("SSSSS",F'{shard_id:05d}' ).replace("TTTTT",F'{task_id:05d}' ),writer_batch_size=_A,storage_options=_A,embed_local_files=_A,)
SCREAMING_SNAKE_CASE_ : List[Any] = pa.Table.from_batches([batch] )
writer.write_table(_A )
if writer._num_bytes > 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]],names=["task_id", "num_examples", "num_bytes"],)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(_A ) ):
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(os.path.dirname(_A ),os.path.basename(_A ) )
shutil.move(_A,_A )
SCREAMING_SNAKE_CASE_ : Any = (
self.df.mapInArrow(_A,"task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ),pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ),pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ),pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ),)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __UpperCamelCase ( self : Optional[int],_A : "datasets.SplitGenerator",_A : str = "arrow",_A : Optional[Union[str, int]] = None,_A : Optional[int] = None,**_A : Union[str, Any],):
"""simple docstring"""
self._validate_cache_dir()
SCREAMING_SNAKE_CASE_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = not is_remote_filesystem(self._fs )
SCREAMING_SNAKE_CASE_ : int = os.path.join if is_local else posixpath.join
SCREAMING_SNAKE_CASE_ : str = "-TTTTT-SSSSS-of-NNNNN"
SCREAMING_SNAKE_CASE_ : Any = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
SCREAMING_SNAKE_CASE_ : Dict = path_join(self._output_dir,_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : Any = []
for task_id, content in self._prepare_split_single(_A,_A,_A ):
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Union[str, Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = total_num_examples
SCREAMING_SNAKE_CASE_ : Optional[int] = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
SCREAMING_SNAKE_CASE_ : int = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
SCREAMING_SNAKE_CASE_ : List[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
_A : int,_A : int,_A : int,):
rename(
_A,fpath.replace("SSSSS",F'{shard_id:05d}' ).replace("TTTTT",F'{task_id:05d}' ),fpath.replace("TTTTT-SSSSS",F'{global_shard_id:05d}' ).replace("NNNNN",F'{total_shards:05d}' ),)
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : List[str] = 0
for i in range(len(_A ) ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = task_id_and_num_shards[i]
for shard_id in range(_A ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(_A,len(_A ) ).map(lambda _A : _rename_shard(*_A ) ).collect()
else:
# don't use any pattern
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS",F'{shard_id:05d}' ).replace("TTTTT",F'{task_id:05d}' ),fpath.replace(_A,"" ),)
def __UpperCamelCase ( self : List[str],_A : "datasets.SplitGenerator",):
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 216 | 1 |
'''simple docstring'''
from __future__ import annotations
class lowerCamelCase :
def __init__( self , a_ ):
lowerCAmelCase : Union[str, Any] = data
lowerCAmelCase : Node | None = None
lowerCAmelCase : Node | None = None
def __A ( a_ : Node | None ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __A ( a_ : Node | None ):
return 1 + max(depth_of_tree(tree.left ) ,depth_of_tree(tree.right ) ) if tree else 0
def __A ( a_ : Node ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __A ( ): # Main function for testing.
lowerCAmelCase : Dict = Node(1 )
lowerCAmelCase : int = Node(2 )
lowerCAmelCase : Optional[Any] = Node(3 )
lowerCAmelCase : Tuple = Node(4 )
lowerCAmelCase : str = Node(5 )
lowerCAmelCase : Optional[Any] = Node(6 )
lowerCAmelCase : List[Any] = Node(7 )
lowerCAmelCase : List[str] = Node(8 )
lowerCAmelCase : Tuple = Node(9 )
print(is_full_binary_tree(__snake_case ) )
print(depth_of_tree(__snake_case ) )
print("Tree is: " )
display(__snake_case )
if __name__ == "__main__":
main()
| 713 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __A ( a_ : Any=None ,a_ : List[Any]=None ):
return field(default_factory=lambda: default ,metadata=a_ )
@dataclass
class lowerCamelCase :
snake_case_ = field(
metadata={"help": "The csv file to plot."} , )
snake_case_ = field(
default=_A , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
snake_case_ = field(
default=_A , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
snake_case_ = field(
default=_A , metadata={"help": "Disable logarithmic scale when plotting"} , )
snake_case_ = field(
default=_A , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
snake_case_ = field(
default=_A , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
snake_case_ = list_field(
default=_A , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def __A ( a_ : Tuple ):
try:
int(a_ )
return True
except ValueError:
return False
def __A ( a_ : int ):
try:
float(a_ )
return True
except ValueError:
return False
class lowerCamelCase :
def __init__( self , a_ ):
lowerCAmelCase : Optional[Any] = args
lowerCAmelCase : List[str] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="" ) as csv_file:
lowerCAmelCase : str = csv.DictReader(a_ )
for row in reader:
lowerCAmelCase : Tuple = row["model"]
self.result_dict[model_name]["bsz"].append(int(row["batch_size"] ) )
self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"] ) )
if can_convert_to_int(row["result"] ):
# value is not None
lowerCAmelCase : Union[str, Any] = int(row["result"] )
elif can_convert_to_float(row["result"] ):
# value is not None
lowerCAmelCase : Optional[int] = float(row["result"] )
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : Any = plt.subplots()
lowerCAmelCase : int = "Time usage" if self.args.is_time else "Memory usage"
lowerCAmelCase : List[Any] = title_str + " for training" if self.args.is_train else title_str + " for inference"
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("log" )
ax.set_yscale("log" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
lowerCAmelCase : str = sorted(set(self.result_dict[model_name]["bsz"] ) )
lowerCAmelCase : List[str] = sorted(set(self.result_dict[model_name]["seq_len"] ) )
lowerCAmelCase : Union[str, Any] = self.result_dict[model_name]["result"]
((lowerCAmelCase) , (lowerCAmelCase)) : str = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
lowerCAmelCase : Union[str, Any] = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
lowerCAmelCase : int = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=a_ , )
else:
lowerCAmelCase : Any = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((lowerCAmelCase) , (lowerCAmelCase)) : Any = (
("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz")
)
lowerCAmelCase : Union[str, Any] = np.asarray(a_ , a_ )[: len(a_ )]
plt.scatter(
a_ , a_ , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(a_ , a_ , "--" )
title_str += F''' {label_model_name} vs.'''
lowerCAmelCase : List[str] = title_str[:-4]
lowerCAmelCase : List[Any] = "Time in s" if self.args.is_time else "Memory in MB"
# plot
plt.title(a_ )
plt.xlabel(a_ )
plt.ylabel(a_ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __A ( ):
lowerCAmelCase : Optional[Any] = HfArgumentParser(a_ )
lowerCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase : str = Plot(args=a_ )
plot.plot()
if __name__ == "__main__":
main()
| 551 | 0 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCamelCase__ : str = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[str]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
if args.student_type == "roberta":
snake_case__ = False
elif args.student_type == "gpt2":
snake_case__ = False
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
if args.student_type == "roberta":
snake_case__ = False
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
snake_case__ = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=__lowerCAmelCase , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=__lowerCAmelCase , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=__lowerCAmelCase , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=__lowerCAmelCase , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=__lowerCAmelCase , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=__lowerCAmelCase , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=__lowerCAmelCase , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=__lowerCAmelCase , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=__lowerCAmelCase , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=__lowerCAmelCase , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=__lowerCAmelCase , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=__lowerCAmelCase , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=__lowerCAmelCase , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=__lowerCAmelCase , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=__lowerCAmelCase , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=__lowerCAmelCase , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=__lowerCAmelCase , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__lowerCAmelCase , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=__lowerCAmelCase , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=__lowerCAmelCase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5e-4 , type=__lowerCAmelCase , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-6 , type=__lowerCAmelCase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=__lowerCAmelCase , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=__lowerCAmelCase , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=__lowerCAmelCase , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=__lowerCAmelCase , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=__lowerCAmelCase , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=__lowerCAmelCase , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=__lowerCAmelCase , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=__lowerCAmelCase , default=4000 , help='''Checkpoint interval.''' )
snake_case__ = parser.parse_args()
sanity_checks(__lowerCAmelCase )
# ARGS #
init_gpu_params(__lowerCAmelCase )
set_seed(__lowerCAmelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(F"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(__lowerCAmelCase ) , __lowerCAmelCase , indent=4 )
git_log(args.dump_path )
snake_case__ , snake_case__ , snake_case__ = MODEL_CLASSES[args.student_type]
snake_case__ , snake_case__ , snake_case__ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
snake_case__ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
snake_case__ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
snake_case__ = tokenizer.all_special_tokens.index(__lowerCAmelCase )
snake_case__ = tokenizer.all_special_ids[idx]
logger.info(F"""Special tokens {special_tok_ids}""" )
snake_case__ = special_tok_ids
snake_case__ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
snake_case__ = pickle.load(__lowerCAmelCase )
if args.mlm:
logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
snake_case__ = pickle.load(__lowerCAmelCase )
snake_case__ = np.maximum(__lowerCAmelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
snake_case__ = 0.0 # do not predict special tokens
snake_case__ = torch.from_numpy(__lowerCAmelCase )
else:
snake_case__ = None
snake_case__ = LmSeqsDataset(params=__lowerCAmelCase , data=__lowerCAmelCase )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F"""Loading student config from {args.student_config}""" )
snake_case__ = student_config_class.from_pretrained(args.student_config )
snake_case__ = True
if args.student_pretrained_weights is not None:
logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" )
snake_case__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=__lowerCAmelCase )
else:
snake_case__ = student_model_class(__lowerCAmelCase )
if args.n_gpu > 0:
student.to(F"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
snake_case__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__lowerCAmelCase )
if args.n_gpu > 0:
teacher.to(F"""cuda:{args.local_rank}""" )
logger.info(F"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__lowerCAmelCase , __lowerCAmelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__lowerCAmelCase , __lowerCAmelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
snake_case__ = Distiller(
params=__lowerCAmelCase , dataset=__lowerCAmelCase , token_probs=__lowerCAmelCase , student=__lowerCAmelCase , teacher=__lowerCAmelCase )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 33 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
A_ : int = logging.get_logger(__name__)
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase ) | 57 | 0 |
"""simple docstring"""
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->tuple[float, float]:
# Check if the input is valid
if not len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = equationa
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = equationa
# Calculate the determinants of the matrices
_lowerCamelCase : int = aa * ba - aa * ba
_lowerCamelCase : int = ca * ba - ca * ba
_lowerCamelCase : str = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_lowerCamelCase : str = determinant_x / determinant
_lowerCamelCase : Union[str, Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 558 | """simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : Optional[Any] = '''laion/clap-htsat-unfused'''
_lowerCamelCase : Union[str, Any] = tempfile.mkdtemp()
def a__ ( self , **_lowercase ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **_lowercase )
def a__ ( self , **_lowercase ) -> str:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **_lowercase )
def a__ ( self ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> str:
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_feature_extractor()
_lowerCamelCase : Optional[int] = ClapProcessor(tokenizer=_lowercase , feature_extractor=_lowercase )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowercase )
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : List[Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : str = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_lowerCamelCase : Dict = self.get_feature_extractor(do_normalize=_lowercase , padding_value=1.0 )
_lowerCamelCase : Optional[int] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowercase )
def a__ ( self ) -> int:
_lowerCamelCase : Any = self.get_feature_extractor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Dict = ClapProcessor(tokenizer=_lowercase , feature_extractor=_lowercase )
_lowerCamelCase : List[Any] = floats_list((3, 1000) )
_lowerCamelCase : Optional[int] = feature_extractor(_lowercase , return_tensors='''np''' )
_lowerCamelCase : Optional[Any] = processor(audios=_lowercase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a__ ( self ) -> Optional[Any]:
_lowerCamelCase : Dict = self.get_feature_extractor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[Any] = ClapProcessor(tokenizer=_lowercase , feature_extractor=_lowercase )
_lowerCamelCase : Dict = '''This is a test string'''
_lowerCamelCase : Dict = processor(text=_lowercase )
_lowerCamelCase : Any = tokenizer(_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a__ ( self ) -> List[str]:
_lowerCamelCase : List[Any] = self.get_feature_extractor()
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : Tuple = ClapProcessor(tokenizer=_lowercase , feature_extractor=_lowercase )
_lowerCamelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Dict = processor.batch_decode(_lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def a__ ( self ) -> List[Any]:
_lowerCamelCase : str = self.get_feature_extractor()
_lowerCamelCase : Any = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = ClapProcessor(tokenizer=_lowercase , feature_extractor=_lowercase )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
| 558 | 1 |
import os
# Precomputes a list of the 100 first triangular numbers
A : List[str] = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = os.path.dirname(os.path.realpath(__magic_name__ ) )
lowercase__ = os.path.join(__magic_name__ , """words.txt""" )
lowercase__ = """"""
with open(__magic_name__ ) as f:
lowercase__ = f.readline()
lowercase__ = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
lowercase__ = [
word
for word in [sum(ord(__magic_name__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__magic_name__ )
if __name__ == "__main__":
print(solution())
| 15 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def a__ ( ):
'''simple docstring'''
print("Making key files..." )
make_key_files("rsa" , 1_0_2_4 )
print("Key files generation successful." )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
print("Generating prime p..." )
lowerCAmelCase : int = rabinMiller.generate_large_prime(SCREAMING_SNAKE_CASE )
print("Generating prime q..." )
lowerCAmelCase : Dict = rabinMiller.generate_large_prime(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
lowerCAmelCase : List[str] = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
lowerCAmelCase : List[Any] = cryptoMath.find_mod_inverse(SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) )
lowerCAmelCase : int = (n, e)
lowerCAmelCase : Union[str, Any] = (n, d)
return (public_key, private_key)
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
lowerCAmelCase , lowerCAmelCase : Tuple = generate_key(SCREAMING_SNAKE_CASE )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , "w" ) as out_file:
out_file.write(f"""{key_size},{public_key[0]},{public_key[1]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , "w" ) as out_file:
out_file.write(f"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 645 | 0 |
from __future__ import annotations
def a_ ( __magic_name__ = 4 ) -> str:
"""simple docstring"""
snake_case : Tuple = abs(lowerCAmelCase_ ) or 4
return [[1 + x + y * row_size for x in range(lowerCAmelCase_ )] for y in range(lowerCAmelCase_ )]
def a_ ( __magic_name__ ) -> Tuple:
"""simple docstring"""
return reverse_row(transpose(lowerCAmelCase_ ) )
# OR.. transpose(reverse_column(matrix))
def a_ ( __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
return reverse_row(reverse_column(lowerCAmelCase_ ) )
# OR.. reverse_column(reverse_row(matrix))
def a_ ( __magic_name__ ) -> List[str]:
"""simple docstring"""
return reverse_column(transpose(lowerCAmelCase_ ) )
# OR.. transpose(reverse_row(matrix))
def a_ ( __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : Optional[int] = [list(lowerCAmelCase_ ) for x in zip(*lowerCAmelCase_ )]
return matrix
def a_ ( __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : Optional[Any] = matrix[::-1]
return matrix
def a_ ( __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : Optional[int] = [x[::-1] for x in matrix]
return matrix
def a_ ( __magic_name__ ) -> str:
"""simple docstring"""
for i in matrix:
print(*lowerCAmelCase_ )
if __name__ == "__main__":
_a : Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
_a : Dict = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
_a : Dict = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 707 |
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError('''only integers accepted as input''' )
else:
snake_case : str = str(abs(__magic_name__ ) )
snake_case : Optional[Any] = [list(__magic_name__ ) for char in range(len(__magic_name__ ) )]
for index in range(len(__magic_name__ ) ):
num_transpositions[index].pop(__magic_name__ )
return max(
int(''''''.join(list(__magic_name__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 84 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class A_ :
"""simple docstring"""
def __init__( self :int , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str=13 , lowerCAmelCase__ :Union[str, Any]=10 , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :Union[str, Any]=2 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :str=32 , lowerCAmelCase__ :str=5 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Any=37 , lowerCAmelCase__ :Optional[int]="gelu" , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Union[str, Any]=10 , lowerCAmelCase__ :Dict=0.0_2 , lowerCAmelCase__ :Tuple=0.9 , lowerCAmelCase__ :Tuple=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = parent
snake_case_ : Dict = batch_size
snake_case_ : Tuple = image_size
snake_case_ : List[str] = num_channels
snake_case_ : str = patch_size
snake_case_ : int = tubelet_size
snake_case_ : Any = num_frames
snake_case_ : Optional[Any] = is_training
snake_case_ : Union[str, Any] = use_labels
snake_case_ : str = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : str = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : str = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : int = type_sequence_label_size
snake_case_ : str = initializer_range
snake_case_ : List[str] = mask_ratio
snake_case_ : Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
snake_case_ : Optional[int] = (image_size // patch_size) ** 2
snake_case_ : Optional[int] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
snake_case_ : int = int(mask_ratio * self.seq_length )
def _A ( self :Dict ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Optional[int] = None
if self.use_labels:
snake_case_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _A ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = VideoMAEModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = VideoMAEForPreTraining(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
snake_case_ : Any = torch.ones((self.num_masks,) )
snake_case_ : Dict = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
snake_case_ : int = mask.expand(self.batch_size , -1 ).bool()
snake_case_ : Union[str, Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
# model only returns predictions for masked patches
snake_case_ : Dict = mask.sum().item()
snake_case_ : List[Any] = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _A ( self :str ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_ : List[Any] = config_and_inputs
snake_case_ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
a__ = (
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
def _A ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = VideoMAEModelTester(self )
snake_case_ : Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str=False ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = copy.deepcopy(lowerCAmelCase__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
snake_case_ : Dict = torch.ones((self.model_tester.num_masks,) )
snake_case_ : int = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
snake_case_ : Optional[Any] = mask.expand(self.model_tester.batch_size , -1 ).bool()
snake_case_ : Dict = bool_masked_pos.to(lowerCAmelCase__ )
if return_labels:
if model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
pass
def _A ( self :Any ) -> Optional[int]:
'''simple docstring'''
snake_case_, snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : str = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def _A ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(lowerCAmelCase__ )
snake_case_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : List[Any] = [*signature.parameters.keys()]
snake_case_ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _A ( self :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__ )
@slow
def _A ( self :int ) -> List[str]:
'''simple docstring'''
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : int = VideoMAEModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _A ( self :int ) -> Optional[int]:
'''simple docstring'''
if not self.has_attentions:
pass
else:
snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[Any] = True
for model_class in self.all_model_classes:
snake_case_ : Any = self.model_tester.seq_length - self.model_tester.num_masks
snake_case_ : Dict = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
snake_case_ : Optional[Any] = True
snake_case_ : List[str] = False
snake_case_ : str = True
snake_case_ : List[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : int = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ : Dict = True
snake_case_ : Dict = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : Optional[int] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : List[Any] = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
snake_case_ : Any = len(lowerCAmelCase__ )
# Check attention is always last and order is fine
snake_case_ : str = True
snake_case_ : List[str] = True
snake_case_ : Any = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : Dict = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase__ ) )
snake_case_ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _A ( self :int ) -> int:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] ):
snake_case_ : Optional[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : Optional[int] = outputs.hidden_states
snake_case_ : Tuple = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
snake_case_ : Dict = self.model_tester.seq_length - self.model_tester.num_masks
snake_case_ : Optional[int] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
snake_case_, snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ : Optional[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _A ( self :List[str] ) -> List[str]:
'''simple docstring'''
pass
def __UpperCAmelCase ( )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : Dict = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" ,filename="eating_spaghetti.npy" ,repo_type="dataset" )
snake_case_ : int = np.load(__magic_name__ )
return list(__magic_name__ )
@require_torch
@require_vision
class A_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _A ( self :Tuple ) -> str:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _A ( self :Dict ) -> int:
'''simple docstring'''
snake_case_ : str = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
lowerCAmelCase__ )
snake_case_ : Optional[Any] = self.default_image_processor
snake_case_ : Union[str, Any] = prepare_video()
snake_case_ : List[str] = image_processor(lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
snake_case_ : Tuple = model(**lowerCAmelCase__ )
# verify the logits
snake_case_ : Dict = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
snake_case_ : Tuple = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(lowerCAmelCase__ )
snake_case_ : Tuple = self.default_image_processor
snake_case_ : Dict = prepare_video()
snake_case_ : Union[str, Any] = image_processor(lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# add boolean mask, indicating which patches to mask
snake_case_ : Optional[int] = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
snake_case_ : Dict = torch.load(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
snake_case_ : Any = model(**lowerCAmelCase__ )
# verify the logits
snake_case_ : str = torch.Size([1, 1_408, 1_536] )
snake_case_ : List[Any] = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=lowerCAmelCase__ )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
snake_case_ : Any = torch.tensor([0.5_1_4_2] , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase__ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
snake_case_ : Tuple = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=lowerCAmelCase__ ).to(
lowerCAmelCase__ )
with torch.no_grad():
snake_case_ : int = model(**lowerCAmelCase__ )
snake_case_ : List[Any] = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase__ , atol=1E-4 ) )
| 653 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
def update_area_of_max_square(__magic_name__ ,__magic_name__ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
snake_case_ : str = update_area_of_max_square(__magic_name__ ,col + 1 )
snake_case_ : Dict = update_area_of_max_square(row + 1 ,col + 1 )
snake_case_ : int = update_area_of_max_square(row + 1 ,__magic_name__ )
if mat[row][col]:
snake_case_ : str = 1 + min([right, diagonal, down] )
snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ )
return sub_problem_sol
else:
return 0
snake_case_ : Union[str, Any] = [0]
update_area_of_max_square(0 ,0 )
return largest_square_area[0]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
__magic_name__ ,__magic_name__ ,__magic_name__ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
snake_case_ : Dict = update_area_of_max_square_using_dp_array(__magic_name__ ,col + 1 ,__magic_name__ )
snake_case_ : List[Any] = update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,__magic_name__ )
snake_case_ : Any = update_area_of_max_square_using_dp_array(row + 1 ,__magic_name__ ,__magic_name__ )
if mat[row][col]:
snake_case_ : int = 1 + min([right, diagonal, down] )
snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ )
snake_case_ : Optional[Any] = sub_problem_sol
return sub_problem_sol
else:
return 0
snake_case_ : List[Any] = [0]
snake_case_ : Optional[int] = [[-1] * cols for _ in range(__magic_name__ )]
update_area_of_max_square_using_dp_array(0 ,0 ,__magic_name__ )
return largest_square_area[0]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Dict = [[0] * (cols + 1) for _ in range(rows + 1 )]
snake_case_ : Dict = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
snake_case_ : List[str] = dp_array[row][col + 1]
snake_case_ : Any = dp_array[row + 1][col + 1]
snake_case_ : Any = dp_array[row + 1][col]
if mat[row][col] == 1:
snake_case_ : Any = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : str = max(dp_array[row][col] ,__magic_name__ )
else:
snake_case_ : Optional[Any] = 0
return largest_square_area
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : str = [0] * (cols + 1)
snake_case_ : Tuple = [0] * (cols + 1)
snake_case_ : List[str] = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
snake_case_ : Optional[Any] = current_row[col + 1]
snake_case_ : Optional[int] = next_row[col + 1]
snake_case_ : Dict = next_row[col]
if mat[row][col] == 1:
snake_case_ : Union[str, Any] = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Any = max(current_row[col] ,__magic_name__ )
else:
snake_case_ : Dict = 0
snake_case_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653 | 1 |
'''simple docstring'''
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
) | 712 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCAmelCase ( lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
__lowerCAmelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCAmelCase = [4, 4, 4, 4]
__lowerCAmelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
else:
__lowerCAmelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCAmelCase = 96
elif "small" in model_name:
__lowerCAmelCase = 96
elif "base" in model_name:
__lowerCAmelCase = 1_28
elif "large" in model_name:
__lowerCAmelCase = 1_92
elif "xlarge" in model_name:
__lowerCAmelCase = 2_56
elif "huge" in model_name:
__lowerCAmelCase = 3_52
# set label information
__lowerCAmelCase = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
__lowerCAmelCase = "imagenet-22k-id2label.json"
else:
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = FocalNetConfig(
embed_dim=lowerCamelCase , depths=lowerCamelCase , focal_levels=lowerCamelCase , focal_windows=lowerCamelCase , use_conv_embed=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase , use_post_layernorm=lowerCamelCase , use_layerscale=lowerCamelCase , )
return config
def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__lowerCAmelCase = "encoder." + name
if "encoder.layers" in name:
__lowerCAmelCase = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
__lowerCAmelCase = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
__lowerCAmelCase = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCAmelCase = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCAmelCase = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCAmelCase = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
__lowerCAmelCase = "layernorm.weight"
if name == "norm.bias":
__lowerCAmelCase = "layernorm.bias"
if "head" in name:
__lowerCAmelCase = name.replace("head" , "classifier" )
else:
__lowerCAmelCase = "focalnet." + name
return name
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
__lowerCAmelCase = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
__lowerCAmelCase = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCamelCase )
__lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
__lowerCAmelCase = state_dict.pop(lowerCamelCase )
__lowerCAmelCase = val
__lowerCAmelCase = get_focalnet_config(lowerCamelCase )
__lowerCAmelCase = FocalNetForImageClassification(lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase )
# verify conversion
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = BitImageProcessor(
do_resize=lowerCamelCase , size={"shortest_edge": 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase , crop_size=2_24 , do_normalize=lowerCamelCase , image_mean=lowerCamelCase , image_std=lowerCamelCase , )
__lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
__lowerCAmelCase = processor(images=lowerCamelCase , return_tensors="pt" )
__lowerCAmelCase = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
__lowerCAmelCase = image_transforms(lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase , atol=1e-4 )
__lowerCAmelCase = model(**lowerCamelCase )
__lowerCAmelCase = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCAmelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
__lowerCAmelCase = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
__lowerCAmelCase = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
__lowerCAmelCase = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
__lowerCAmelCase = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
__lowerCAmelCase = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print(f'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(f'''{model_name}''' )
processor.push_to_hub(f'''{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 39 | 0 |
def lowerCamelCase__ ( lowercase = 100 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (n * (n + 1) // 2) ** 2
SCREAMING_SNAKE_CASE : Dict = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 62 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : Optional[int] = """altclip_text_model"""
def __init__( self :Tuple , __lowercase :Dict=25_0002 , __lowercase :Union[str, Any]=1024 , __lowercase :Optional[int]=24 , __lowercase :List[Any]=16 , __lowercase :int=4096 , __lowercase :Union[str, Any]="gelu" , __lowercase :Optional[int]=0.1 , __lowercase :Optional[int]=0.1 , __lowercase :Union[str, Any]=514 , __lowercase :Dict=1 , __lowercase :int=0.02 , __lowercase :Optional[int]=0.02 , __lowercase :Optional[Any]=1e-0_5 , __lowercase :str=1 , __lowercase :Tuple=0 , __lowercase :List[str]=2 , __lowercase :str="absolute" , __lowercase :Tuple=True , __lowercase :Optional[int]=768 , **__lowercase :Union[str, Any] , ):
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__lowerCamelCase : int =vocab_size
__lowerCamelCase : int =hidden_size
__lowerCamelCase : Dict =num_hidden_layers
__lowerCamelCase : Optional[int] =num_attention_heads
__lowerCamelCase : str =hidden_act
__lowerCamelCase : Optional[Any] =intermediate_size
__lowerCamelCase : Tuple =hidden_dropout_prob
__lowerCamelCase : str =attention_probs_dropout_prob
__lowerCamelCase : Optional[int] =max_position_embeddings
__lowerCamelCase : Dict =type_vocab_size
__lowerCamelCase : Tuple =initializer_range
__lowerCamelCase : int =initializer_factor
__lowerCamelCase : List[Any] =layer_norm_eps
__lowerCamelCase : List[Any] =position_embedding_type
__lowerCamelCase : str =use_cache
__lowerCamelCase : str =project_dim
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : List[Any] = """altclip_vision_model"""
def __init__( self :Dict , __lowercase :Optional[int]=768 , __lowercase :Dict=3072 , __lowercase :Dict=512 , __lowercase :Optional[Any]=12 , __lowercase :Tuple=12 , __lowercase :Optional[int]=3 , __lowercase :Any=224 , __lowercase :List[Any]=32 , __lowercase :Optional[Any]="quick_gelu" , __lowercase :Optional[int]=1e-5 , __lowercase :List[Any]=0.0 , __lowercase :Dict=0.02 , __lowercase :Optional[int]=1.0 , **__lowercase :Dict , ):
super().__init__(**__lowercase )
__lowerCamelCase : Tuple =hidden_size
__lowerCamelCase : List[Any] =intermediate_size
__lowerCamelCase : int =projection_dim
__lowerCamelCase : Union[str, Any] =num_hidden_layers
__lowerCamelCase : Optional[int] =num_attention_heads
__lowerCamelCase : Tuple =num_channels
__lowerCamelCase : str =patch_size
__lowerCamelCase : str =image_size
__lowerCamelCase : str =initializer_range
__lowerCamelCase : Optional[int] =initializer_factor
__lowerCamelCase : int =attention_dropout
__lowerCamelCase : Dict =layer_norm_eps
__lowerCamelCase : Optional[Any] =hidden_act
@classmethod
def __lowercase ( cls :str , __lowercase :Union[str, os.PathLike] , **__lowercase :Dict ):
cls._set_token_in_kwargs(__lowercase )
__lowerCamelCase , __lowerCamelCase : List[Any] =cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
__lowerCamelCase : str =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__lowercase , **__lowercase )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : Dict = """altclip"""
__snake_case : Optional[Any] = True
def __init__( self :Any , __lowercase :Union[str, Any]=None , __lowercase :str=None , __lowercase :Tuple=768 , __lowercase :Any=2.6592 , **__lowercase :Any ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
__lowerCamelCase : Union[str, Any] =kwargs.pop('''text_config_dict''' , __lowercase )
__lowerCamelCase : List[str] =kwargs.pop('''vision_config_dict''' , __lowercase )
super().__init__(**__lowercase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
__lowerCamelCase : Any ={}
# This is the complete result when using `text_config_dict`.
__lowerCamelCase : Any =AltCLIPTextConfig(**__lowercase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
__lowerCamelCase : Dict =(
f'`{key}` is found in both `text_config_dict` and `text_config` but with different values. '
f'The value `text_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
__lowerCamelCase : Dict =(
f'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '
f'value `text_config["{key}"]` will be overriden.'
)
logger.warning(__lowercase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
__lowerCamelCase : int ={}
# This is the complete result when using `vision_config_dict`.
__lowerCamelCase : Dict =AltCLIPVisionConfig(**__lowercase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
__lowerCamelCase : Any ={
str(__lowercase ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
__lowerCamelCase : List[str] =(
f'`{key}` is found in both `vision_config_dict` and `vision_config` but with different '
f'values. The value `vision_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
__lowerCamelCase : Union[str, Any] =(
f'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '
f'The value `vision_config["{key}"]` will be overriden.'
)
logger.warning(__lowercase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
__lowerCamelCase : Any ={}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
__lowerCamelCase : int ={}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
__lowerCamelCase : Any =AltCLIPTextConfig(**__lowercase )
__lowerCamelCase : Union[str, Any] =AltCLIPVisionConfig(**__lowercase )
__lowerCamelCase : Tuple =projection_dim
__lowerCamelCase : Tuple =logit_scale_init_value
__lowerCamelCase : Tuple =1.0
@classmethod
def __lowercase ( cls :Union[str, Any] , __lowercase :AltCLIPTextConfig , __lowercase :AltCLIPVisionConfig , **__lowercase :List[str] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowercase )
def __lowercase ( self :List[str] ):
__lowerCamelCase : int =copy.deepcopy(self.__dict__ )
__lowerCamelCase : int =self.text_config.to_dict()
__lowerCamelCase : int =self.vision_config.to_dict()
__lowerCamelCase : Optional[int] =self.__class__.model_type
return output
| 179 | 0 |
"""simple docstring"""
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__A : Dict = logging.getLogger(__name__)
__A : Any = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__A : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
__UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase_ )} , )
__UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
__UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCAmelCase : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__UpperCAmelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCAmelCase : bool = field(
default=lowerCAmelCase_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def snake_case ( self : Tuple ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
__UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__UpperCAmelCase : Optional[str] = field(default=lowerCAmelCase_ , metadata={"help": "The input training data file (a text file)."} )
__UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"help": "An optional input train ref data file for whole word masking in Chinese."} , )
__UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."} , )
__UpperCAmelCase : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
__UpperCAmelCase : Optional[int] = field(
default=5 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
__UpperCAmelCase : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated. Default to the max input length of the model."
)
} , )
__UpperCAmelCase : Optional[int] = field(
default=lowerCAmelCase_ , metadata={"help": "The number of processes to use for the preprocessing."} , )
__UpperCAmelCase : float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
__UpperCAmelCase : bool = field(
default=lowerCAmelCase_ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
def snake_case ( self : int ):
if self.train_file is not None:
__lowercase : Optional[Any] = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__lowercase : Optional[int] = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->List[str]:
"""simple docstring"""
with open(_lowerCamelCase, "r", encoding="utf-8" ) as f:
__lowercase : str = [json.loads(_lowerCamelCase ) for line in f.read().splitlines() if (len(_lowerCamelCase ) > 0 and not line.isspace())]
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
__lowercase : Tuple = {c: dataset[c] for c in dataset.column_names}
__lowercase : List[str] = refs
return Dataset.from_dict(_lowerCamelCase )
def snake_case__ ( ) ->List[str]:
"""simple docstring"""
__lowercase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase ,__lowercase ,__lowercase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase ,__lowercase ,__lowercase : List[str] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowercase : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", _lowerCamelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowercase : List[str] = load_dataset(data_args.dataset_name, data_args.dataset_config_name )
if "validation" not in datasets.keys():
__lowercase : Optional[int] = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F'train[:{data_args.validation_split_percentage}%]', )
__lowercase : Tuple = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F'train[{data_args.validation_split_percentage}%:]', )
else:
__lowercase : Optional[Any] = {}
if data_args.train_file is not None:
__lowercase : Tuple = data_args.train_file
if data_args.validation_file is not None:
__lowercase : Optional[int] = data_args.validation_file
__lowercase : Optional[Any] = data_args.train_file.split("." )[-1]
if extension == "txt":
__lowercase : Union[str, Any] = "text"
__lowercase : Dict = load_dataset(_lowerCamelCase, data_files=_lowerCamelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase : Any = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowercase : Tuple = AutoConfig.from_pretrained(model_args.config_name, **_lowerCamelCase )
elif model_args.model_name_or_path:
__lowercase : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path, **_lowerCamelCase )
else:
__lowercase : List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
__lowercase : Union[str, Any] = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__lowercase : Optional[Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **_lowerCamelCase )
elif model_args.model_name_or_path:
__lowercase : List[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **_lowerCamelCase )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name." )
if model_args.model_name_or_path:
__lowercase : List[Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=_lowerCamelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info("Training new model from scratch" )
__lowercase : Optional[int] = AutoModelForMaskedLM.from_config(_lowerCamelCase )
model.resize_token_embeddings(len(_lowerCamelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__lowercase : Optional[int] = datasets["train"].column_names
else:
__lowercase : Union[str, Any] = datasets["validation"].column_names
__lowercase : List[Any] = "text" if "text" in column_names else column_names[0]
__lowercase : List[str] = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(_lowerCamelCase ):
# Remove empty lines
__lowercase : List[Any] = [line for line in examples["text"] if len(_lowerCamelCase ) > 0 and not line.isspace()]
return tokenizer(examples["text"], padding=_lowerCamelCase, truncation=_lowerCamelCase, max_length=data_args.max_seq_length )
__lowercase : Optional[int] = datasets.map(
_lowerCamelCase, batched=_lowerCamelCase, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__lowercase : Dict = add_chinese_references(tokenized_datasets["train"], data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__lowercase : List[str] = add_chinese_references(
tokenized_datasets["validation"], data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__lowercase : Optional[int] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__lowercase : List[str] = False
# Data collator
# This one will take care of randomly masking the tokens.
__lowercase : Any = DataCollatorForWholeWordMask(tokenizer=_lowerCamelCase, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase : Any = Trainer(
model=_lowerCamelCase, args=_lowerCamelCase, train_dataset=tokenized_datasets["train"] if training_args.do_train else None, eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None, tokenizer=_lowerCamelCase, data_collator=_lowerCamelCase, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowercase : str = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__lowercase : Union[str, Any] = model_args.model_name_or_path
else:
__lowercase : Optional[Any] = None
__lowercase : Any = trainer.train(resume_from_checkpoint=_lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowercase : List[Any] = os.path.join(training_args.output_dir, "train_results.txt" )
if trainer.is_world_process_zero():
with open(_lowerCamelCase, "w" ) as writer:
logger.info("***** Train results *****" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json" ) )
# Evaluation
__lowercase : Tuple = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowercase : Union[str, Any] = trainer.evaluate()
__lowercase : str = math.exp(eval_output["eval_loss"] )
__lowercase : Optional[Any] = perplexity
__lowercase : Union[str, Any] = os.path.join(training_args.output_dir, "eval_results_mlm_wwm.txt" )
if trainer.is_world_process_zero():
with open(_lowerCamelCase, "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in sorted(results.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
return results
def snake_case__ ( _lowerCamelCase ) ->Union[str, Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 281 |
"""simple docstring"""
import os
import sys
import unittest
__A : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__A : Optional[Any] = os.path.join(git_repo_path, 'src', 'diffusers')
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : str ):
__lowercase : int = find_backend(" if not is_torch_available():" )
self.assertEqual(lowercase__ , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__lowercase : int = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(lowercase__ , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__lowercase : str = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(lowercase__ , "torch_and_transformers_and_onnx" )
def snake_case ( self : Any ):
__lowercase : List[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , lowercase__ )
self.assertIn("torch_and_transformers" , lowercase__ )
self.assertIn("flax_and_transformers" , lowercase__ )
self.assertIn("torch_and_transformers_and_onnx" , lowercase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def snake_case ( self : Dict ):
__lowercase : Tuple = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(lowercase__ , "\nCONSTANT = None\n" )
__lowercase : Union[str, Any] = create_dummy_object("function" , "'torch'" )
self.assertEqual(
lowercase__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
__lowercase : Tuple = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
__lowercase : Dict = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(lowercase__ , lowercase__ )
def snake_case ( self : List[Any] ):
__lowercase : Optional[int] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
__lowercase : List[str] = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , lowercase__ )
| 281 | 1 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowercase ( ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = [randint(-1000 , 1000 ) for i in range(10 )]
snake_case : int = randint(-5000 , 5000 )
return (arr, r)
__lowercase : Optional[int] = make_dataset()
def lowercase ( __A : list[int] , __A : int ) -> Optional[int]:
'''simple docstring'''
for triplet in permutations(a_ , 3 ):
if sum(a_ ) == target:
return tuple(sorted(a_ ) )
return (0, 0, 0)
def lowercase ( __A : list[int] , __A : int ) -> Tuple:
'''simple docstring'''
arr.sort()
snake_case : Optional[Any] = len(a_ )
for i in range(n - 1 ):
snake_case , snake_case : int = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowercase ( ) -> Any:
'''simple docstring'''
snake_case : Optional[Any] = """\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"""
snake_case : Optional[int] = """\ntriplet_sum1(*dataset)\n"""
snake_case : Union[str, Any] = """\ntriplet_sum2(*dataset)\n"""
snake_case : int = repeat(setup=a_ , stmt=a_ , repeat=5 , number=1_0000 )
snake_case : List[str] = repeat(setup=a_ , stmt=a_ , repeat=5 , number=1_0000 )
return (min(a_ ), min(a_ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowercase : Optional[Any] = solution_times()
print(f'''The time for naive implementation is {times[0]}.''')
print(f'''The time for optimized implementation is {times[1]}.''')
| 36 |
'''simple docstring'''
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __lowercase ( __magic_name__ , unittest.TestCase ):
_a = PriorTransformer
_a = """hidden_states"""
@property
def UpperCamelCase__ ( self ) -> int:
__a = 4
__a = 8
__a = 7
__a = floats_tensor((batch_size, embedding_dim) ).to(UpperCamelCase )
__a = floats_tensor((batch_size, embedding_dim) ).to(UpperCamelCase )
__a = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(UpperCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCamelCase__ ( self , UpperCamelCase=0 ) -> Union[str, Any]:
torch.manual_seed(UpperCamelCase )
__a = 4
__a = 8
__a = 7
__a = torch.randn((batch_size, embedding_dim) ).to(UpperCamelCase )
__a = torch.randn((batch_size, embedding_dim) ).to(UpperCamelCase )
__a = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(UpperCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def UpperCamelCase__ ( self ) -> List[str]:
return (4, 8)
@property
def UpperCamelCase__ ( self ) -> str:
return (4, 8)
def UpperCamelCase__ ( self ) -> Dict:
__a = {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
__a = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self ) -> List[Any]:
__a , __a = PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(UpperCamelCase )
__a = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def UpperCamelCase__ ( self ) -> Optional[Any]:
__a , __a = self.prepare_init_args_and_inputs_for_common()
__a = self.model_class(**UpperCamelCase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , UpperCamelCase )
def UpperCamelCase__ ( self ) -> str:
__a = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
__a = model.to(UpperCamelCase )
if hasattr(UpperCamelCase , 'set_default_attn_processor' ):
model.set_default_attn_processor()
__a = self.get_dummy_seed_input()
with torch.no_grad():
__a = model(**UpperCamelCase )[0]
__a = output[0, :5].flatten().cpu()
print(UpperCamelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__a = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(UpperCamelCase , UpperCamelCase , rtol=1e-2 ) )
@slow
class __lowercase ( unittest.TestCase ):
def UpperCamelCase__ ( self , UpperCamelCase=1 , UpperCamelCase=768 , UpperCamelCase=77 , UpperCamelCase=0 ) -> List[str]:
torch.manual_seed(UpperCamelCase )
__a = batch_size
__a = embedding_dim
__a = num_embeddings
__a = torch.randn((batch_size, embedding_dim) ).to(UpperCamelCase )
__a = torch.randn((batch_size, embedding_dim) ).to(UpperCamelCase )
__a = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(UpperCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCamelCase__ ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
__a = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(UpperCamelCase )
__a = self.get_dummy_seed_input(seed=UpperCamelCase )
with torch.no_grad():
__a = model(**UpperCamelCase )[0]
assert list(sample.shape ) == [1, 768]
__a = sample[0, :8].flatten().cpu()
print(UpperCamelCase )
__a = torch.tensor(UpperCamelCase )
assert torch_all_close(UpperCamelCase , UpperCamelCase , atol=1e-3 )
| 539 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCamelCase__ , 2 ) + pow(UpperCamelCase__ , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
"""simple docstring"""
def __init__( self: List[str] , __a: List[str] , __a: Dict=13 , __a: Tuple=7 , __a: Dict=False , __a: str=True , __a: List[Any]=False , __a: Dict=True , __a: Any=33 , __a: Optional[Any]=32 , __a: List[Any]=5 , __a: Any=4 , __a: Dict=37 , __a: str="gelu" , __a: str=0.1 , __a: int=0.1 , __a: Optional[int]=512 , __a: List[Any]=16 , __a: int=2 , __a: int=0.02 , __a: Optional[int]=3 , __a: str=4 , __a: Tuple=None , )-> Tuple:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : Any = seq_length
lowerCamelCase : Any = is_training
lowerCamelCase : Tuple = use_input_mask
lowerCamelCase : int = use_token_type_ids
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Union[str, Any] = hidden_dropout_prob
lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : List[Any] = type_sequence_label_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Union[str, Any] = num_labels
lowerCamelCase : Optional[Any] = num_choices
lowerCamelCase : Any = scope
def a__ ( self: Optional[int] )-> List[Any]:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Dict = None
if self.use_input_mask:
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Any = None
lowerCamelCase : int = None
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self: Tuple )-> Union[str, Any]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a__ ( self: List[Any] , __a: List[str] , __a: str , __a: Tuple , __a: List[str] , __a: List[str] , __a: str )-> int:
lowerCamelCase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a )
lowerCamelCase : str = model(__a )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self: int , __a: Union[str, Any] , __a: Optional[int] , __a: List[str] , __a: str , __a: List[str] , __a: Tuple )-> int:
lowerCamelCase : str = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self: List[str] , __a: List[Any] , __a: List[str] , __a: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> List[str]:
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : Dict = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Tuple = config_and_inputs
lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any =False
snake_case__ : Dict =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Dict =()
snake_case__ : Optional[int] =(
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Any =True
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Optional[Any] = EsmModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 )
def a__ ( self: List[Any] )-> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Tuple )-> Any:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def a__ ( self: Any )-> List[Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> List[str]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=__a )
lowerCamelCase : List[str] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase : Union[str, Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase : Optional[Any] = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def a__ ( self: Optional[int] )-> int:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Any = EsmEmbeddings(config=__a )
lowerCamelCase : Dict = torch.empty(2 , 4 , 30 )
lowerCamelCase : List[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Any )-> Optional[Any]:
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Dict )-> Dict:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self: List[str] )-> Dict:
pass
@require_torch
class A__ ( __lowercase):
"""simple docstring"""
@slow
def a__ ( self: Any )-> Union[str, Any]:
with torch.no_grad():
lowerCamelCase : Union[str, Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Tuple = model(__a )[0]
lowerCamelCase : Dict = 33
lowerCamelCase : List[str] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
lowerCamelCase : Tuple = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def a__ ( self: Dict )-> str:
with torch.no_grad():
lowerCamelCase : Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase : Any = model(__a )[0]
# compare the actual values for a slice.
lowerCamelCase : Tuple = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 42 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_snake_case = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class lowerCAmelCase_ ( _lowercase ):
"""simple docstring"""
UpperCAmelCase__ = "albert"
def __init__( self , _SCREAMING_SNAKE_CASE=30_000 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=4_096 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=16_384 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__UpperCamelCase = vocab_size
__UpperCamelCase = embedding_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_hidden_groups
__UpperCamelCase = num_attention_heads
__UpperCamelCase = inner_group_num
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = classifier_dropout_prob
__UpperCamelCase = position_embedding_type
class lowerCAmelCase_ ( _lowercase ):
"""simple docstring"""
@property
def __lowercase( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 383 |
import gc
import threading
import time
import psutil
import torch
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> Any:
__UpperCamelCase = psutil.Process()
__UpperCamelCase = False
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase = -1
while True:
__UpperCamelCase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def __lowercase( self ) -> Dict:
__UpperCamelCase = True
__UpperCamelCase = threading.Thread(target=self.peak_monitor )
__UpperCamelCase = True
self.thread.start()
def __lowercase( self ) -> List[str]:
__UpperCamelCase = False
self.thread.join()
return self.cpu_memory_peak
_snake_case = PeakCPUMemory()
def _a ( ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__UpperCamelCase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__UpperCamelCase = torch.cuda.memory_allocated(__lowercase )
torch.cuda.reset_peak_memory_stats()
return measures
def _a ( __lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__UpperCamelCase = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
__UpperCamelCase = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__UpperCamelCase = (torch.cuda.memory_allocated(__lowercase ) - start_measures[str(__lowercase )]) / 2**20
__UpperCamelCase = (torch.cuda.max_memory_allocated(__lowercase ) - start_measures[str(__lowercase )]) / 2**20
return measures
def _a ( __lowercase , __lowercase ) -> Any:
"""simple docstring"""
print(F"""{description}:""" )
print(F"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(__lowercase )]:.2f}MiB""" )
__UpperCamelCase = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" )
| 383 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
A_ : Any = FunnelTokenizer
A_ : Dict = FunnelTokenizerFast
A_ : Dict = True
A_ : Tuple = True
def _A ( self : List[Any] ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ : Dict = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _A ( self : Union[str, Any] , **a__ : str ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **a__ )
def _A ( self : Optional[int] , **a__ : str ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def _A ( self : Tuple , a__ : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = "UNwant\u00E9d,running"
lowerCAmelCase__ : Union[str, Any] = "unwanted, running"
return input_text, output_text
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ : List[str] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(a__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [7, 4, 5, 10, 8, 9] )
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.get_tokenizers(do_lower_case=a__ )
for tokenizer in tokenizers:
lowerCAmelCase__ : Tuple = tokenizer("UNwant\u00E9d,running" )
lowerCAmelCase__ : str = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
lowerCAmelCase__ : Optional[int] = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 568 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_=False ):
"""simple docstring"""
try:
lowerCAmelCase__ : Optional[Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCAmelCase__ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
lowerCAmelCase__ : Tuple = strtobool(lowerCamelCase_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
snake_case = parse_flag_from_env("""RUN_SLOW""", default=False)
snake_case = parse_flag_from_env("""RUN_REMOTE""", default=False)
snake_case = parse_flag_from_env("""RUN_LOCAL""", default=True)
snake_case = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
snake_case = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
snake_case = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
snake_case = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
snake_case = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
snake_case = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
snake_case = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
snake_case = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
lowerCAmelCase__ : List[str] = unittest.skip("test requires faiss" )(lowerCamelCase_ )
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
lowerCAmelCase__ : Optional[int] = unittest.skip("test requires regex" )(lowerCamelCase_ )
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
lowerCAmelCase__ : List[Any] = unittest.skip("test requires elasticsearch" )(lowerCamelCase_ )
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
lowerCAmelCase__ : List[str] = unittest.skip("test requires sqlalchemy" )(lowerCamelCase_ )
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
lowerCAmelCase__ : List[Any] = unittest.skip("test requires PyTorch" )(lowerCamelCase_ )
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if not config.TF_AVAILABLE:
lowerCAmelCase__ : Optional[int] = unittest.skip("test requires TensorFlow" )(lowerCamelCase_ )
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
lowerCAmelCase__ : Optional[Any] = unittest.skip("test requires JAX" )(lowerCamelCase_ )
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
lowerCAmelCase__ : int = unittest.skip("test requires Pillow" )(lowerCamelCase_ )
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(lowerCamelCase_ )
else:
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(lowerCamelCase_ )
else:
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(lowerCamelCase_ )
else:
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def _require_spacy_model(lowerCamelCase_ ):
try:
import spacy # noqa F401
spacy.load(lowerCamelCase_ )
except ImportError:
return unittest.skip("test requires spacy" )(lowerCamelCase_ )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(lowerCamelCase_ ) )(lowerCamelCase_ )
else:
return test_case
return _require_spacy_model
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(lowerCamelCase_ )
else:
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(lowerCamelCase_ )
else:
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
lowerCAmelCase__ : int = unittest.skip("test is slow" )(lowerCamelCase_ )
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
lowerCAmelCase__ : Tuple = unittest.skip("test is local" )(lowerCamelCase_ )
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
lowerCAmelCase__ : List[str] = unittest.skip("test is packaged" )(lowerCamelCase_ )
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
lowerCAmelCase__ : Union[str, Any] = unittest.skip("test requires remote" )(lowerCamelCase_ )
return test_case
def UpperCAmelCase_ ( *lowerCamelCase_ ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(lowerCamelCase_ ) and name.startswith("test" ):
for decorator in decorators:
lowerCAmelCase__ : Optional[Any] = decorator(lowerCamelCase_ )
setattr(cls , lowerCamelCase_ , lowerCamelCase_ )
return cls
return decorate
class lowerCAmelCase ( UpperCamelCase_ ):
pass
class lowerCAmelCase ( UpperCamelCase_ ):
A_ : List[Any] = 0
A_ : int = 1
A_ : Any = 2
@contextmanager
def UpperCAmelCase_ ( lowerCamelCase_=OfflineSimulationMode.CONNECTION_FAILS , lowerCamelCase_=1e-1_6 ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = requests.Session().request
def timeout_request(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ):
# Change the url to an invalid url so that the connection hangs
lowerCAmelCase__ : Union[str, Any] = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
lowerCAmelCase__ : Union[str, Any] = timeout
try:
return online_request(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowerCAmelCase__ : Union[str, Any] = url
lowerCAmelCase__ : List[Any] = e.args[0]
lowerCAmelCase__ : Tuple = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]''' ),)
lowerCAmelCase__ : str = (max_retry_error,)
raise
def raise_connection_error(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ):
raise requests.ConnectionError("Offline mode is enabled." , request=lowerCamelCase_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , lowerCamelCase_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , lowerCamelCase_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCamelCase_ ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def UpperCAmelCase_ ( *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : str = str(Path().resolve() )
with tempfile.TemporaryDirectory(*lowerCamelCase_ , **lowerCamelCase_ ) as tmp_dir:
try:
os.chdir(lowerCamelCase_ )
yield
finally:
os.chdir(lowerCamelCase_ )
@contextmanager
def UpperCAmelCase_ ( ):
"""simple docstring"""
import gc
gc.collect()
lowerCAmelCase__ : List[str] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCAmelCase_ ( ):
"""simple docstring"""
import gc
gc.collect()
lowerCAmelCase__ : Dict = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return deepcopy(lowerCamelCase_ ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(lowerCamelCase_ ).integers(0 , 1_0_0 , 1_0 ).tolist()
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ):
try:
return func(*lowerCamelCase_ , **lowerCamelCase_ )
except HTTPError as err:
if str(lowerCamelCase_ ).startswith("500" ) or str(lowerCamelCase_ ).startswith("502" ):
pytest.xfail(str(lowerCamelCase_ ) )
raise err
return decorator.decorator(_wrapper , lowerCamelCase_ )
class lowerCAmelCase :
def __init__( self : Optional[Any] , a__ : Any , a__ : Optional[Any] , a__ : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = returncode
lowerCAmelCase__ : str = stdout
lowerCAmelCase__ : Tuple = stderr
async def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
while True:
lowerCAmelCase__ : Optional[int] = await stream.readline()
if line:
callback(lowerCamelCase_ )
else:
break
async def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=False , lowerCamelCase_=False ):
"""simple docstring"""
if echo:
print("\nRunning: " , " ".join(lowerCamelCase_ ) )
lowerCAmelCase__ : Tuple = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : str = []
def tee(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="" ):
lowerCAmelCase__ : int = line.decode("utf-8" ).rstrip()
sink.append(lowerCamelCase_ )
if not quiet:
print(lowerCamelCase_ , lowerCamelCase_ , file=lowerCamelCase_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda lowerCamelCase_ : tee(lowerCamelCase_ , lowerCamelCase_ , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda lowerCamelCase_ : tee(lowerCamelCase_ , lowerCamelCase_ , sys.stderr , label="stderr:" ) ),
] , timeout=lowerCamelCase_ , )
return _RunOutput(await p.wait() , lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=1_8_0 , lowerCamelCase_=False , lowerCamelCase_=True ):
"""simple docstring"""
lowerCAmelCase__ : Any = asyncio.get_event_loop()
lowerCAmelCase__ : Any = loop.run_until_complete(
_stream_subprocess(lowerCamelCase_ , env=lowerCamelCase_ , stdin=lowerCamelCase_ , timeout=lowerCamelCase_ , quiet=lowerCamelCase_ , echo=lowerCamelCase_ ) )
lowerCAmelCase__ : Union[str, Any] = " ".join(lowerCamelCase_ )
if result.returncode > 0:
lowerCAmelCase__ : List[str] = "\n".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : int = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
lowerCAmelCase__ : Optional[Any] = re.sub(R"^gw" , "" , lowerCamelCase_ , 0 , re.M )
return int(lowerCamelCase_ )
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = 2_9_5_0_0
lowerCAmelCase__ : Optional[Any] = pytest_xdist_worker_id()
return port + uniq_delta
| 568 | 1 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = DownBlockaD # noqa F405
lowerCamelCase__ = '''down'''
def __A ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = ResnetDownsampleBlockaD # noqa F405
lowerCamelCase__ = '''down'''
def __A ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = AttnDownBlockaD # noqa F405
lowerCamelCase__ = '''down'''
def __A ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = CrossAttnDownBlockaD # noqa F405
lowerCamelCase__ = '''down'''
def __A ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 32
return init_dict, inputs_dict
def __A ( self : int ) -> Any:
SCREAMING_SNAKE_CASE_ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = SimpleCrossAttnDownBlockaD # noqa F405
lowerCamelCase__ = '''down'''
@property
def __A ( self : List[str] ) -> Optional[int]:
return super().get_dummy_input(include_encoder_hidden_states=__magic_name__ )
def __A ( self : str ) -> int:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def __A ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE_ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = SkipDownBlockaD # noqa F405
lowerCamelCase__ = '''down'''
@property
def __A ( self : str ) -> int:
return super().get_dummy_input(include_skip_sample=__magic_name__ )
def __A ( self : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = AttnSkipDownBlockaD # noqa F405
lowerCamelCase__ = '''down'''
@property
def __A ( self : Dict ) -> Optional[int]:
return super().get_dummy_input(include_skip_sample=__magic_name__ )
def __A ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = DownEncoderBlockaD # noqa F405
lowerCamelCase__ = '''down'''
@property
def __A ( self : Optional[Any] ) -> Union[str, Any]:
return super().get_dummy_input(include_temb=__magic_name__ )
def __A ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = {
"in_channels": 32,
"out_channels": 32,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = AttnDownEncoderBlockaD # noqa F405
lowerCamelCase__ = '''down'''
@property
def __A ( self : Union[str, Any] ) -> List[Any]:
return super().get_dummy_input(include_temb=__magic_name__ )
def __A ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = {
"in_channels": 32,
"out_channels": 32,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = UNetMidBlockaD # noqa F405
lowerCamelCase__ = '''mid'''
def __A ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE_ = {
"in_channels": 32,
"temb_channels": 128,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = UNetMidBlockaDCrossAttn # noqa F405
lowerCamelCase__ = '''mid'''
def __A ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 32
return init_dict, inputs_dict
def __A ( self : Any ) -> int:
SCREAMING_SNAKE_CASE_ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCamelCase__ = '''mid'''
@property
def __A ( self : str ) -> str:
return super().get_dummy_input(include_encoder_hidden_states=__magic_name__ )
def __A ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 32
return init_dict, inputs_dict
def __A ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE_ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = UpBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __A ( self : str ) -> Union[str, Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=__magic_name__ )
def __A ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE_ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = ResnetUpsampleBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __A ( self : int ) -> Tuple:
return super().get_dummy_input(include_res_hidden_states_tuple=__magic_name__ )
def __A ( self : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = CrossAttnUpBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __A ( self : int ) -> Union[str, Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=__magic_name__ )
def __A ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 32
return init_dict, inputs_dict
def __A ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = SimpleCrossAttnUpBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __A ( self : str ) -> Tuple:
return super().get_dummy_input(include_res_hidden_states_tuple=__magic_name__ , include_encoder_hidden_states=__magic_name__ )
def __A ( self : str ) -> List[str]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 32
return init_dict, inputs_dict
def __A ( self : str ) -> int:
SCREAMING_SNAKE_CASE_ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = AttnUpBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __A ( self : Union[str, Any] ) -> Tuple:
return super().get_dummy_input(include_res_hidden_states_tuple=__magic_name__ )
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def __A ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE_ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = SkipUpBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __A ( self : List[Any] ) -> List[str]:
return super().get_dummy_input(include_res_hidden_states_tuple=__magic_name__ )
def __A ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE_ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = AttnSkipUpBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __A ( self : Union[str, Any] ) -> Dict:
return super().get_dummy_input(include_res_hidden_states_tuple=__magic_name__ )
def __A ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = UpDecoderBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __A ( self : Tuple ) -> List[str]:
return super().get_dummy_input(include_temb=__magic_name__ )
def __A ( self : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = {"in_channels": 32, "out_channels": 32}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = AttnUpDecoderBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __A ( self : List[str] ) -> Tuple:
return super().get_dummy_input(include_temb=__magic_name__ )
def __A ( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = {"in_channels": 32, "out_channels": 32}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(__magic_name__ )
| 140 | import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = job["started_at"]
SCREAMING_SNAKE_CASE_ = job["completed_at"]
SCREAMING_SNAKE_CASE_ = date_parser.parse(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = date_parser.parse(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = round((end_datetime - start_datetime).total_seconds() / 60.0 )
SCREAMING_SNAKE_CASE_ = start
SCREAMING_SNAKE_CASE_ = end
SCREAMING_SNAKE_CASE_ = duration_in_min
return job_info
def a__ ( __UpperCamelCase , __UpperCamelCase=None ):
SCREAMING_SNAKE_CASE_ = None
if token is not None:
SCREAMING_SNAKE_CASE_ = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
SCREAMING_SNAKE_CASE_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
SCREAMING_SNAKE_CASE_ = requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json()
SCREAMING_SNAKE_CASE_ = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(__UpperCamelCase ) for job in result["jobs"]} )
SCREAMING_SNAKE_CASE_ = math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = requests.get(url + F'''&page={i + 2}''' , headers=__UpperCamelCase ).json()
job_time.update({job["name"]: extract_time_from_single_job(__UpperCamelCase ) for job in result["jobs"]} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
A : Optional[Any] = parser.parse_args()
A : Optional[int] = get_job_time(args.workflow_run_id)
A : Tuple = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"{k}: {v['duration']}")
| 140 | 1 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _lowerCamelCase ( UpperCAmelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def _lowerCamelCase ( UpperCAmelCase_ : Any ) -> List[str]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
A__ = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_, id=UpperCAmelCase_ )
| 562 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=sys.maxsize ) -> str:
A__ = "bilinear"
A__ = max_size
A__ = short_edge_length
def __call__( self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
A__ = []
for img in imgs:
A__ , A__ = img.shape[:2]
# later: provide list and randomly choose index for resize
A__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
A__ = size * 1.0 / min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if h < w:
A__ , A__ = size, scale * w
else:
A__ , A__ = scale * h, size
if max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) > self.max_size:
A__ = self.max_size * 1.0 / max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = newh * scale
A__ = neww * scale
A__ = int(neww + 0.5 )
A__ = int(newh + 0.5 )
if img.dtype == np.uinta:
A__ = Image.fromarray(SCREAMING_SNAKE_CASE__ )
A__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
A__ = np.asarray(SCREAMING_SNAKE_CASE__ )
else:
A__ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
A__ = nn.functional.interpolate(
SCREAMING_SNAKE_CASE__ , (newh, neww) , mode=self.interp_method , align_corners=SCREAMING_SNAKE_CASE__ ).squeeze(0 )
img_augs.append(SCREAMING_SNAKE_CASE__ )
return img_augs
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ ) -> str:
A__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
A__ = cfg.INPUT.FORMAT
A__ = cfg.SIZE_DIVISIBILITY
A__ = cfg.PAD_VALUE
A__ = cfg.INPUT.MAX_SIZE_TEST
A__ = cfg.MODEL.DEVICE
A__ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A__ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A__ = lambda SCREAMING_SNAKE_CASE__ : (x - self.pixel_mean) / self.pixel_std
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
A__ = tuple(max(SCREAMING_SNAKE_CASE__ ) for s in zip(*[img.shape for img in images] ) )
A__ = [im.shape[-2:] for im in images]
A__ = [
nn.functional.pad(
SCREAMING_SNAKE_CASE__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
return torch.stack(SCREAMING_SNAKE_CASE__ ), torch.tensor(SCREAMING_SNAKE_CASE__ )
def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> Optional[int]:
with torch.no_grad():
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = [images]
if single_image:
assert len(SCREAMING_SNAKE_CASE__ ) == 1
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(SCREAMING_SNAKE_CASE__ , images.pop(SCREAMING_SNAKE_CASE__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
SCREAMING_SNAKE_CASE__ , torch.as_tensor(img_tensorize(images.pop(SCREAMING_SNAKE_CASE__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
A__ = torch.tensor([im.shape[:2] for im in images] )
A__ = self.aug(SCREAMING_SNAKE_CASE__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
A__ = [self.normalizer(SCREAMING_SNAKE_CASE__ ) for x in images]
# now pad them to do the following operations
A__ , A__ = self.pad(SCREAMING_SNAKE_CASE__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
A__ = torch.true_divide(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCamelCase ( UpperCAmelCase_ : List[Any], UpperCAmelCase_ : List[str] ) -> List[Any]:
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCamelCase ( UpperCAmelCase_ : List[str], UpperCAmelCase_ : Tuple[int, int] ) -> str:
"""simple docstring"""
assert torch.isfinite(UpperCAmelCase_ ).all(), "Box tensor contains infinite or NaN!"
A__ , A__ = box_size
tensor[:, 0].clamp_(min=0, max=UpperCAmelCase_ )
tensor[:, 1].clamp_(min=0, max=UpperCAmelCase_ )
tensor[:, 2].clamp_(min=0, max=UpperCAmelCase_ )
tensor[:, 3].clamp_(min=0, max=UpperCAmelCase_ )
| 562 | 1 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _lowerCAmelCase :
def __init__(self , lowercase = "cpu" , lowercase = "openai/clip-vit-large-patch14" ):
A_ : Tuple = device
A_ : Union[str, Any] = CLIPTokenizerFast.from_pretrained(lowercase )
A_ : Any = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
A_ : List[Any] = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
A_ : int = torchvision.transforms.Normalize(self.image_mean , self.image_std )
A_ : Union[str, Any] = torchvision.transforms.Resize(224 )
A_ : Optional[int] = torchvision.transforms.CenterCrop(224 )
def _a (self , lowercase ):
A_ : Dict = self.resize(lowercase )
A_ : Union[str, Any] = self.center_crop(lowercase )
A_ : List[Any] = self.normalize(lowercase )
return images
def __call__(self , lowercase=None , lowercase=None , **lowercase ):
A_ : Any = self.tokenizer(text=lowercase , **lowercase )
A_ : Optional[int] = self.preprocess_img(lowercase )
A_ : int = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase=10 , lowercase=0.01 , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=False , lowercase=True , lowercase="image" , lowercase=True , lowercase=False , lowercase=False , lowercase=False , ):
super().__init__()
A_ : Optional[int] = None
A_ : str = device if device else get_device()
if vqgan:
A_ : List[Any] = vqgan
else:
A_ : str = load_vqgan(self.device , conf_path=lowercase , ckpt_path=lowercase )
self.vqgan.eval()
if clip:
A_ : List[Any] = clip
else:
A_ : List[str] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
A_ : Any = ProcessorGradientFlow(device=self.device )
A_ : Dict = iterations
A_ : List[Any] = lr
A_ : Optional[int] = log
A_ : Dict = make_grid
A_ : str = return_val
A_ : str = quantize
A_ : Optional[Any] = self.vqgan.decoder.z_shape
def _a (self , lowercase=None , lowercase=None , lowercase=5 , lowercase=True ):
A_ : Tuple = []
if output_path is None:
A_ : List[Any] = """./animation.gif"""
if input_path is None:
A_ : Tuple = self.save_path
A_ : Optional[int] = sorted(glob(input_path + """/*""" ) )
if not len(lowercase ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(lowercase ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
A_ : List[str] = total_duration / len(lowercase )
A_ : Optional[Any] = [frame_duration] * len(lowercase )
if extend_frames:
A_ : Optional[int] = 1.5
A_ : List[str] = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(lowercase ) )
imageio.mimsave(lowercase , lowercase , duration=lowercase )
print(F'gif saved to {output_path}' )
def _a (self , lowercase=None , lowercase=None ):
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
A_ : int = preprocess(Image.open(lowercase ) , target_image_size=256 ).to(self.device )
A_ : Tuple = preprocess_vqgan(lowercase )
A_, *A_ : int = self.vqgan.encode(lowercase )
return z
def _a (self , lowercase ):
A_ : Optional[Any] = self.latent.detach().requires_grad_()
A_ : Union[str, Any] = base_latent + transform_vector
if self.quantize:
A_, *A_ : Any = self.vqgan.quantize(lowercase )
else:
A_ : Any = trans_latent
return self.vqgan.decode(lowercase )
def _a (self , lowercase , lowercase , lowercase=None ):
A_ : Optional[Any] = self.clip_preprocessor(text=lowercase , images=lowercase , return_tensors="""pt""" , padding=lowercase )
A_ : Union[str, Any] = self.clip(**lowercase )
A_ : Union[str, Any] = clip_outputs.logits_per_image
if weights is not None:
A_ : Optional[Any] = similarity_logits * weights
return similarity_logits.sum()
def _a (self , lowercase , lowercase , lowercase ):
A_ : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""] , lowercase , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
A_ : str = self._get_clip_similarity(neg_prompts["""prompts"""] , lowercase , weights=neg_prompts["""weights"""] )
else:
A_ : Tuple = torch.tensor([1] , device=self.device )
A_ : Optional[Any] = -torch.log(lowercase ) + torch.log(lowercase )
return loss
def _a (self , lowercase , lowercase , lowercase ):
A_ : Tuple = torch.randn_like(self.latent , requires_grad=lowercase , device=self.device )
A_ : Optional[int] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
A_ : List[str] = self._add_vector(lowercase )
A_ : Dict = loop_post_process(lowercase )
A_ : List[Any] = self._get_CLIP_loss(lowercase , lowercase , lowercase )
print("""CLIP loss""" , lowercase )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=lowercase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def _a (self , lowercase , lowercase , lowercase ):
wandb.init(reinit=lowercase , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
A_ : str = Image.open(lowercase )
A_ : Union[str, Any] = image.resize((256, 256) )
wandb.log("""Original Image""" , wandb.Image(lowercase ) )
def _a (self , lowercase ):
if not prompts:
return []
A_ : int = []
A_ : Any = []
if isinstance(lowercase , lowercase ):
A_ : Tuple = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(lowercase , (tuple, list) ):
A_ : Dict = prompt[0]
A_ : Tuple = float(prompt[1] )
elif ":" in prompt:
A_, A_ : Union[str, Any] = prompt.split(""":""" )
A_ : List[str] = float(lowercase )
else:
A_ : Dict = prompt
A_ : Optional[int] = 1.0
processed_prompts.append(lowercase )
weights.append(lowercase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowercase , device=self.device ),
}
def _a (self , lowercase , lowercase=None , lowercase=None , lowercase=True , lowercase=False , lowercase=True , lowercase=True , lowercase=None , ):
if image_path:
A_ : List[Any] = self._get_latent(lowercase )
else:
A_ : Dict = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(lowercase , lowercase , lowercase )
assert pos_prompts, "You must provide at least one positive prompt."
A_ : List[Any] = self.process_prompts(lowercase )
A_ : Dict = self.process_prompts(lowercase )
if save_final and save_path is None:
A_ : str = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(lowercase ):
os.makedirs(lowercase )
else:
A_ : str = save_path + """_""" + get_timestamp()
os.makedirs(lowercase )
A_ : List[Any] = save_path
A_ : str = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(lowercase ) )
A_ : Optional[Any] = loop_post_process(lowercase )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowercase , lowercase , lowercase ) ):
if show_intermediate:
show_pil(lowercase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(lowercase )} )
if show_final:
show_pil(lowercase )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}_final.png' ) ) | 667 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Union[str, Any] = tempfile.mkdtemp()
A_ : List[Any] = BlipImageProcessor()
A_ : Optional[int] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
A_ : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
A_ : Dict = InstructBlipProcessor(lowercase , lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer
def _a (self ):
shutil.rmtree(self.tmpdirname )
def _a (self ):
A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
A_ : str = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
A_ : str = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
self.assertIsInstance(processor.qformer_tokenizer , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : List[str] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = self.prepare_image_inputs()
A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""np""" )
A_ : Dict = processor(images=lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ):
A_ : List[Any] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Any = self.get_qformer_tokenizer()
A_ : List[str] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : str = """lower newer"""
A_ : List[Any] = processor(text=lowercase )
A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase )
A_ : List[Any] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _a (self ):
A_ : int = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Any = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Optional[int] = """lower newer"""
A_ : Optional[int] = self.prepare_image_inputs()
A_ : Tuple = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : Dict = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : Optional[int] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Optional[int] = processor.batch_decode(lowercase )
A_ : Dict = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Optional[int] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = """lower newer"""
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) | 667 | 1 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser(lowercase )
SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses()[0]
SCREAMING_SNAKE_CASE : Optional[Any] = TensorFlowBenchmark(args=lowercase )
try:
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
SCREAMING_SNAKE_CASE : int = "Arg --no_{0} is no longer used, please use --no-{0} instead."
SCREAMING_SNAKE_CASE : Optional[int] = " ".join(str(lowercase ).split(" " )[:-1] )
SCREAMING_SNAKE_CASE : Union[str, Any] = ""
SCREAMING_SNAKE_CASE : Any = eval(str(lowercase ).split(" " )[-1] )
SCREAMING_SNAKE_CASE : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase )
if len(lowercase ) > 0:
SCREAMING_SNAKE_CASE : Optional[int] = full_error_msg + begin_error_msg + str(lowercase )
raise ValueError(lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 488 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = IFImgaImgSuperResolutionPipeline
UpperCamelCase_ : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
UpperCamelCase_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
UpperCamelCase_ : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _A ( self : List[Any] ):
return self._get_superresolution_dummy_components()
def _A ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str]=0 ):
if str(UpperCAmelCase_ ).startswith("mps" ):
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _A ( self : Tuple ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _A ( self : Optional[Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _A ( self : List[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _A ( self : Optional[int] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _A ( self : Tuple ):
self._test_save_load_local()
def _A ( self : List[str] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 488 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE__ = model(A_ )["""last_hidden_state"""]
SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 100 |
__lowerCamelCase : Optional[Any] = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCamelCase : int = {value: key for key, value in MORSE_CODE_DICT.items()}
def A__ ( _a : str ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A__ ( _a : str ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def A__ ( ):
'''simple docstring'''
snake_case__ : List[Any] ="""Morse code here!"""
print(_a )
snake_case__ : Union[str, Any] =encrypt(_a )
print(_a )
snake_case__ : Optional[int] =decrypt(_a )
print(_a )
if __name__ == "__main__":
main()
| 385 | 0 |
'''simple docstring'''
def snake_case ( snake_case : int = 1000 ) -> int:
"""simple docstring"""
lowerCAmelCase = -1
lowerCAmelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowerCAmelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowerCAmelCase = n - a - b
if c * c == (a * a + b * b):
lowerCAmelCase = a * b * c
if candidate >= product:
lowerCAmelCase = candidate
return product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 514 |
'''simple docstring'''
def snake_case ( snake_case : int ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = 0
lowerCAmelCase = len(snake_case )
for i in range(n - 1 ):
for j in range(i + 1 , snake_case ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def snake_case ( snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if len(snake_case ) <= 1:
return arr, 0
lowerCAmelCase = len(snake_case ) // 2
lowerCAmelCase = arr[0:mid]
lowerCAmelCase = arr[mid:]
lowerCAmelCase , lowerCAmelCase = count_inversions_recursive(snake_case )
lowerCAmelCase , lowerCAmelCase = count_inversions_recursive(snake_case )
lowerCAmelCase , lowerCAmelCase = _count_cross_inversions(snake_case , snake_case )
lowerCAmelCase = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def snake_case ( snake_case : Union[str, Any] , snake_case : int ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = []
lowerCAmelCase = lowerCAmelCase = lowerCAmelCase = 0
while i < len(snake_case ) and j < len(snake_case ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(snake_case ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(snake_case ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def snake_case ( ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowerCAmelCase = count_inversions_bf(snake_case )
lowerCAmelCase , lowerCAmelCase = count_inversions_recursive(snake_case )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , snake_case )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowerCAmelCase = count_inversions_bf(snake_case )
lowerCAmelCase , lowerCAmelCase = count_inversions_recursive(snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , snake_case )
# an empty list should also have zero inversions
lowerCAmelCase = []
lowerCAmelCase = count_inversions_bf(snake_case )
lowerCAmelCase , lowerCAmelCase = count_inversions_recursive(snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , snake_case )
if __name__ == "__main__":
main()
| 514 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ : List[str] = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[int] = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 105 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowerCamelCase ( a_=None ) -> List[str]:
if subparsers is not None:
lowerCAmelCase_ = subparsers.add_parser('test' )
else:
lowerCAmelCase_ = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=a_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=a_ )
return parser
def lowerCamelCase ( a_ ) -> List[Any]:
lowerCAmelCase_ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
lowerCAmelCase_ = script_name
else:
lowerCAmelCase_ = F'''--config_file={args.config_file} {script_name}'''
lowerCAmelCase_ = ['accelerate-launch'] + test_args.split()
lowerCAmelCase_ = execute_subprocess_async(a_ , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def lowerCamelCase ( ) -> Optional[Any]:
lowerCAmelCase_ = test_command_parser()
lowerCAmelCase_ = parser.parse_args()
test_command(a_ )
if __name__ == "__main__":
main()
| 318 | 0 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class A ( __snake_case , unittest.TestCase ):
__magic_name__ = BarthezTokenizer
__magic_name__ = BarthezTokenizerFast
__magic_name__ = True
__magic_name__ = True
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
super().setUp()
A : Optional[Any] = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowercase_ )
A : Tuple = tokenizer
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : str = '''<pad>'''
A : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowercase_ ) , 101122 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : List[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
A : str = [0, 57, 3018, 70307, 91, 2]
A : Optional[Any] = self.tokenizer(
lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , truncation=lowercase_ , return_tensors='''pt''' )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
A : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase_ , lowercase_ )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
A : Tuple = self.get_tokenizer()
A : str = self.get_rust_tokenizer()
A : List[Any] = '''I was born in 92000, and this is falsé.'''
A : Optional[Any] = tokenizer.tokenize(lowercase_ )
A : Optional[int] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
A : int = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
A : Tuple = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
A : Union[str, Any] = self.get_rust_tokenizer()
A : int = tokenizer.encode(lowercase_ )
A : Optional[Any] = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Any = {'''input_ids''': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
A : Optional[int] = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=lowercase_ , )
| 716 |
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase : Optional[int] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowercase : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowercase : Tuple = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[str] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'config.{attribute}' in modeling_source
or F'getattr(config, "{attribute}"' in modeling_source
or F'getattr(self.config, "{attribute}"' in modeling_source
):
A : Dict = True
# Deal with multi-line cases
elif (
re.search(
RF'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , snake_case__ , )
is not None
):
A : int = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
A : Optional[Any] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
A : Tuple = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
A : List[Any] = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
A : List[Any] = True
if not attribute_used:
A : str = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
A : Tuple = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
A : Optional[Any] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
A : Union[str, Any] = True
elif attribute.endswith('''_token_id''' ):
A : Dict = True
# configuration class specific cases
if not case_allowed:
A : Union[str, Any] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
A : List[str] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[Any] = dict(inspect.signature(config_class.__init__ ).parameters )
A : Tuple = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
A : int = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
A : Dict = {}
if len(config_class.attribute_map ) > 0:
A : str = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
A : Optional[Any] = inspect.getsourcefile(snake_case__ )
A : Optional[int] = os.path.dirname(snake_case__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
A : Union[str, Any] = [os.path.join(snake_case__ , snake_case__ ) for fn in os.listdir(snake_case__ ) if fn.startswith('''modeling_''' )]
# Get the source code strings
A : List[Any] = []
for path in modeling_paths:
if os.path.isfile(snake_case__ ):
with open(snake_case__ ) as fp:
modeling_sources.append(fp.read() )
A : str = []
for config_param, default_value in zip(snake_case__ , snake_case__ ):
# `attributes` here is all the variant names for `config_param`
A : Union[str, Any] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
unused_attributes.append(attributes[0] )
return sorted(snake_case__ )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : int = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
A : str = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda snake_case__ : inspect.isclass(snake_case__ )
and issubclass(snake_case__ , snake_case__ )
and inspect.getmodule(snake_case__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
A : List[Any] = check_config_attributes_being_used(snake_case__ )
if len(snake_case__ ) > 0:
A : Tuple = unused_attributes
if len(snake_case__ ) > 0:
A : Any = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F'{name}: {attributes}\n'
raise ValueError(snake_case__ )
if __name__ == "__main__":
check_config_attributes()
| 343 | 0 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__a: Optional[int] = logging.get_logger(__name__)
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : int = RobertaPreLayerNormConfig.from_pretrained(
__lowerCamelCase , architectures=['''RobertaPreLayerNormForMaskedLM'''] )
# convert state_dict
lowercase__ : int = torch.load(hf_hub_download(repo_id=__lowerCamelCase , filename='''pytorch_model.bin''' ) )
lowercase__ : Union[str, Any] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.''' ):
lowercase__ : str = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ):
continue
lowercase__ : Dict = tensor_value
lowercase__ : int = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__lowerCamelCase , config=__lowerCamelCase , state_dict=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
# convert tokenizer
lowercase__ : str = AutoTokenizer.from_pretrained(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__a: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__a: Optional[int] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 152 |
'''simple docstring'''
def _lowerCamelCase (__lowerCamelCase : list[int] , __lowerCamelCase : list[int] , __lowerCamelCase : int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__lowerCamelCase ) )
def _lowerCamelCase (__lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : list[int] , __lowerCamelCase : int ) -> bool:
# Base Case
if index == len(__lowerCamelCase ):
return True
# Recursive Step
for i in range(__lowerCamelCase ):
if valid_coloring(graph[index] , __lowerCamelCase , __lowerCamelCase ):
# Color current vertex
a__ = i
# Validate coloring
if util_color(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 ):
return True
# Backtrack
a__ = -1
return False
def _lowerCamelCase (__lowerCamelCase : list[list[int]] , __lowerCamelCase : int ) -> list[int]:
a__ = [-1] * len(__lowerCamelCase )
if util_color(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , 0 ):
return colored_vertices
return []
| 489 | 0 |
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__UpperCAmelCase : Optional[Any] = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
__UpperCAmelCase : Optional[int] = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
__UpperCAmelCase : Optional[int] = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
return float((preds == labels).mean() )
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : int = simple_accuracy(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase : Tuple = float(fa_score(y_true=_lowerCamelCase , y_pred=_lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : Tuple = float(pearsonr(_lowerCamelCase , _lowerCamelCase )[0] )
UpperCamelCase : Optional[Any] = float(spearmanr(_lowerCamelCase , _lowerCamelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", '''
'''\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
elif self.config_name == "stsb":
return pearson_and_spearman(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", '''
'''\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]''' )
| 706 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def a ( SCREAMING_SNAKE_CASE_ : bool = True , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
UpperCamelCase : int = False
if main_process_only:
UpperCamelCase : int = PartialState().local_process_index == 0
return _tqdm(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , disable=SCREAMING_SNAKE_CASE_ )
| 643 | 0 |
"""simple docstring"""
from math import ceil
def lowercase ( lowerCAmelCase__ = 1_001 ):
lowerCamelCase_ = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
lowerCamelCase_ = 2 * i + 1
lowerCamelCase_ = 2 * i
lowerCamelCase_ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 29 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 144 | 0 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
UpperCamelCase = HfApi()
UpperCamelCase = {}
# fmt: off
UpperCamelCase = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
UpperCamelCase = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
UpperCamelCase = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
UpperCamelCase = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
UpperCamelCase = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
UpperCamelCase = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
UpperCamelCase = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
UpperCamelCase = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
UpperCamelCase = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
UpperCamelCase = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
UpperCamelCase = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
UpperCamelCase = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
UpperCamelCase = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
UpperCamelCase = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
UpperCamelCase = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
UpperCamelCase = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
UpperCamelCase = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith('CompVis'):
UpperCamelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
UpperCamelCase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
UpperCamelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
UpperCamelCase = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
UpperCamelCase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 387 | import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __UpperCAmelCase( self ):
__A : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , "embed_dim" ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , "num_heads" ) )
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=[16, 48, 96] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-12 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ):
__A : Optional[int] = parent
__A : Optional[int] = batch_size
__A : List[Any] = image_size
__A : int = patch_sizes
__A : Optional[Any] = patch_stride
__A : Tuple = patch_padding
__A : str = is_training
__A : List[str] = use_labels
__A : Union[str, Any] = num_labels
__A : Union[str, Any] = num_channels
__A : Tuple = embed_dim
__A : int = num_heads
__A : str = stride_kv
__A : Optional[int] = depth
__A : Tuple = cls_token
__A : Any = attention_drop_rate
__A : Optional[int] = initializer_range
__A : Optional[Any] = layer_norm_eps
def __UpperCAmelCase( self ):
__A : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : Dict = None
if self.use_labels:
__A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__A : Optional[int] = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : int = CvtModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : Dict = model(__UpperCAmelCase )
__A : str = (self.image_size, self.image_size)
__A , __A : List[str] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__A : Dict = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__A : List[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : str = self.num_labels
__A : Any = CvtForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : Union[str, Any] = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase( self ):
__A : Any = self.prepare_config_and_inputs()
__A , __A , __A : Any = config_and_inputs
__A : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : str = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
lowerCamelCase_ : Optional[int] = (
{"""feature-extraction""": CvtModel, """image-classification""": CvtForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ : List[Any] = False
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : Union[str, Any] = False
lowerCamelCase_ : Dict = False
lowerCamelCase_ : Optional[Any] = False
def __UpperCAmelCase( self ):
__A : Any = CvtModelTester(self )
__A : Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def __UpperCAmelCase( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase( self ):
return
@unittest.skip(reason="Cvt does not output attentions" )
def __UpperCAmelCase( self ):
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def __UpperCAmelCase( self ):
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def __UpperCAmelCase( self ):
pass
def __UpperCAmelCase( self ):
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Optional[Any] = model_class(__UpperCAmelCase )
__A : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : str = [*signature.parameters.keys()]
__A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Dict = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__A : List[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__A : Any = outputs.hidden_states
__A : List[Any] = len(self.model_tester.depth )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__A , __A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : str = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Optional[Any] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCAmelCase( self ):
pass
@slow
def __UpperCAmelCase( self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : int = CvtModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def lowerCamelCase_ ( ) -> Dict:
__A : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase( self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCAmelCase( self ):
__A : int = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__UpperCAmelCase )
__A : List[str] = self.default_image_processor
__A : Optional[Any] = prepare_img()
__A : List[Any] = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__A : List[Any] = model(**__UpperCAmelCase )
# verify the logits
__A : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__A : List[str] = torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
| 387 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = 10_00 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 2**power
_SCREAMING_SNAKE_CASE = 0
while n:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 591 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _a :
"""simple docstring"""
def __init__( self , A__ = None ) -> None:
if components is None:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = list(A__ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A__ , self.__components ) ) + ")"
def __add__( self , A__ ) -> Vector:
_SCREAMING_SNAKE_CASE = len(self )
if size == len(A__ ):
_SCREAMING_SNAKE_CASE = [self.__components[i] + other.component(A__ ) for i in range(A__ )]
return Vector(A__ )
else:
raise Exception("""must have the same size""" )
def __sub__( self , A__ ) -> Vector:
_SCREAMING_SNAKE_CASE = len(self )
if size == len(A__ ):
_SCREAMING_SNAKE_CASE = [self.__components[i] - other.component(A__ ) for i in range(A__ )]
return Vector(A__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , A__ ) -> Vector:
...
@overload
def __mul__( self , A__ ) -> float:
...
def __mul__( self , A__ ) -> float | Vector:
if isinstance(A__ , (float, int) ):
_SCREAMING_SNAKE_CASE = [c * other for c in self.__components]
return Vector(A__ )
elif isinstance(A__ , A__ ) and len(self ) == len(A__ ):
_SCREAMING_SNAKE_CASE = len(self )
_SCREAMING_SNAKE_CASE = [self.__components[i] * other.component(A__ ) for i in range(A__ )]
return sum(A__ )
else: # error case
raise Exception("""invalid operand!""" )
def UpperCamelCase ( self ) -> Vector:
return Vector(self.__components )
def UpperCamelCase ( self , A__ ) -> float:
if isinstance(A__ , A__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def UpperCamelCase ( self , A__ , A__ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
_SCREAMING_SNAKE_CASE = value
def UpperCamelCase ( self ) -> float:
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
_SCREAMING_SNAKE_CASE = [c**2 for c in self.__components]
return math.sqrt(sum(A__ ) )
def UpperCamelCase ( self , A__ , A__ = False ) -> float:
_SCREAMING_SNAKE_CASE = self * other
_SCREAMING_SNAKE_CASE = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Vector:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return Vector([0] * dimension )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Vector:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ))
_SCREAMING_SNAKE_CASE = [0] * dimension
_SCREAMING_SNAKE_CASE = 1
return Vector(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Vector:
"""simple docstring"""
assert (
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
and (isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ))
)
return x * scalar + y
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Vector:
"""simple docstring"""
random.seed(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = [random.randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ )]
return Vector(SCREAMING_SNAKE_CASE_ )
class _a :
"""simple docstring"""
def __init__( self , A__ , A__ , A__ ) -> None:
_SCREAMING_SNAKE_CASE = matrix
_SCREAMING_SNAKE_CASE = w
_SCREAMING_SNAKE_CASE = h
def __str__( self ) -> str:
_SCREAMING_SNAKE_CASE = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
_SCREAMING_SNAKE_CASE = []
for i in range(self.__height ):
_SCREAMING_SNAKE_CASE = [
self.__matrix[i][j] + other.component(A__ , A__ )
for j in range(self.__width )
]
matrix.append(A__ )
return Matrix(A__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , A__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
_SCREAMING_SNAKE_CASE = []
for i in range(self.__height ):
_SCREAMING_SNAKE_CASE = [
self.__matrix[i][j] - other.component(A__ , A__ )
for j in range(self.__width )
]
matrix.append(A__ )
return Matrix(A__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , A__ ) -> Matrix:
...
@overload
def __mul__( self , A__ ) -> Vector:
...
def __mul__( self , A__ ) -> Vector | Matrix:
if isinstance(A__ , A__ ): # matrix-vector
if len(A__ ) == self.__width:
_SCREAMING_SNAKE_CASE = zero_vector(self.__height )
for i in range(self.__height ):
_SCREAMING_SNAKE_CASE = [
self.__matrix[i][j] * other.component(A__ )
for j in range(self.__width )
]
ans.change_component(A__ , sum(A__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(A__ , (int, float) ): # matrix-scalar
_SCREAMING_SNAKE_CASE = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A__ , self.__width , self.__height )
return None
def UpperCamelCase ( self ) -> int:
return self.__height
def UpperCamelCase ( self ) -> int:
return self.__width
def UpperCamelCase ( self , A__ , A__ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
_SCREAMING_SNAKE_CASE = value
else:
raise Exception("""change_component: indices out of bounds""" )
def UpperCamelCase ( self , A__ , A__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
_SCREAMING_SNAKE_CASE = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A__ ) ):
_SCREAMING_SNAKE_CASE = minor[i][:y] + minor[i][y + 1 :]
return Matrix(A__ , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCamelCase ( self , A__ , A__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A__ , A__ )
else:
raise Exception("""Indices out of bounds""" )
def UpperCamelCase ( self ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_SCREAMING_SNAKE_CASE = [
self.__matrix[0][y] * self.cofactor(0 , A__ ) for y in range(self.__width )
]
return sum(A__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Matrix:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [[0] * n for _ in range(SCREAMING_SNAKE_CASE_ )]
return Matrix(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Matrix:
"""simple docstring"""
random.seed(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = [
[random.randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )
]
return Matrix(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 591 | 1 |
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , *_UpperCAmelCase , **_UpperCAmelCase ) -> None:
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 399 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
a_ : List[Any] = logging.get_logger(__name__)
a_ : List[str] = "▁"
a_ : Tuple = {"vocab_file": "sentencepiece.bpe.model"}
a_ : int = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
}
}
a_ : str = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
a_ : Any = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCamelCase ( a_ ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =['''input_ids''', '''attention_mask''']
__UpperCamelCase =[]
__UpperCamelCase =[]
def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : int="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : int="<s>" , snake_case__ : int="<unk>" , snake_case__ : str="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : Optional[int]=None , snake_case__ : List[str]=None , snake_case__ : List[Any]=None , snake_case__ : List[Any] = None , snake_case__ : int=None , **snake_case__ : Dict , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = len(self.sp_model )
SCREAMING_SNAKE_CASE = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_SCREAMING_SNAKE_CASE )
}
SCREAMING_SNAKE_CASE = {v: k for k, v in self.lang_code_to_id.items()}
SCREAMING_SNAKE_CASE = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
SCREAMING_SNAKE_CASE = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else 'en_XX'
SCREAMING_SNAKE_CASE = self.lang_code_to_id[self._src_lang]
SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self : Tuple , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self : Any , snake_case__ : Optional[Any] , snake_case__ : int = None , snake_case__ : Optional[Any] = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
def UpperCamelCase ( self : List[str] , snake_case__ : Dict , snake_case__ : List[str] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self : List[Any] , snake_case__ : Dict , snake_case__ : Optional[Any] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self : Tuple , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : str , **snake_case__ : Dict ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
SCREAMING_SNAKE_CASE = src_lang
SCREAMING_SNAKE_CASE = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase ( self : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : int , snake_case__ : str ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[Any] ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ''.join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , ' ' ).strip()
return out_string
def UpperCamelCase ( self : str , snake_case__ : Tuple , snake_case__ : Union[str, Any] = None ):
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def UpperCamelCase ( self : str , snake_case__ : int , snake_case__ : List[Any] = "en_XX" , snake_case__ : str = None , snake_case__ : int = "ro_RO" , **snake_case__ : Optional[Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = src_lang
SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self : Dict , snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.lang_code_to_id[src_lang]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
def UpperCamelCase ( self : int , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.lang_code_to_id[lang]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
| 439 |
'''simple docstring'''
def snake_case ( snake_case : list , snake_case : list , snake_case : int , snake_case : int , snake_case : int ) -> int:
"""simple docstring"""
if index == number_of_items:
return 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = knapsack(snake_case , snake_case , snake_case , snake_case , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase = values[index] + knapsack(
snake_case , snake_case , snake_case , max_weight - weights[index] , index + 1 )
return max(snake_case , snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284 | 0 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class _SCREAMING_SNAKE_CASE ( enum.Enum ):
'''simple docstring'''
lowercase_ = 0
lowercase_ = 1
lowercase_ = 2
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__(self : Tuple , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict) ->str:
'''simple docstring'''
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCamelCase__: Any =None
if self.model.config.prefix is not None:
lowerCamelCase__: List[str] =self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCamelCase__: int =self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =self._sanitize_parameters(prefix=__lowerCAmelCase , **self._forward_params)
lowerCamelCase__: Dict ={**self._preprocess_params, **preprocess_params}
lowerCamelCase__: List[Any] ={**self._forward_params, **forward_params}
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : str , ) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Tuple ={}
if prefix is not None:
lowerCamelCase__: Optional[int] =prefix
if prefix:
lowerCamelCase__: List[Any] =self.tokenizer(
__lowerCAmelCase , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCamelCase__: List[Any] =prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
" [None, \'hole\']")
lowerCamelCase__: Union[str, Any] =handle_long_generation
preprocess_params.update(__lowerCAmelCase)
lowerCamelCase__: Optional[int] =generate_kwargs
lowerCamelCase__: Optional[Any] ={}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`")
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`")
lowerCamelCase__: Optional[Any] =ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`")
lowerCamelCase__: str =ReturnType.TENSORS
if return_type is not None:
lowerCamelCase__: Any =return_type
if clean_up_tokenization_spaces is not None:
lowerCamelCase__: Tuple =clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCamelCase__: List[Any] =self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
if len(__lowerCAmelCase) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim.")
lowerCamelCase__: List[str] =stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : List[str]) ->Tuple:
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True})
return super()._parse_and_tokenize(*__lowerCAmelCase , **__lowerCAmelCase)
def __call__(self : List[Any] , UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int) ->Any:
'''simple docstring'''
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any="" , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : Tuple) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[str] =self.tokenizer(
prefix + prompt_text , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCamelCase__: Optional[int] =prompt_text
if handle_long_generation == "hole":
lowerCamelCase__: Any =inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCamelCase__: Optional[Any] =generate_kwargs["max_new_tokens"]
else:
lowerCamelCase__: Dict =generate_kwargs.get("max_length" , self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected")
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCamelCase__: str =self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length")
lowerCamelCase__: List[str] =inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
lowerCamelCase__: Union[str, Any] =inputs["attention_mask"][:, -keep_length:]
return inputs
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[int] =model_inputs["input_ids"]
lowerCamelCase__: Dict =model_inputs.get("attention_mask" , __lowerCAmelCase)
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCamelCase__: List[Any] =None
lowerCamelCase__: List[str] =None
lowerCamelCase__: str =1
else:
lowerCamelCase__: List[Any] =input_ids.shape[0]
lowerCamelCase__: int =model_inputs.pop("prompt_text")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCamelCase__: str =generate_kwargs.pop("prefix_length" , 0)
if prefix_length > 0:
lowerCamelCase__: str ="max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCamelCase__: List[str] =generate_kwargs.get("max_length") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCamelCase__: Optional[Any] ="min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCamelCase__: str =self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase)
lowerCamelCase__: List[str] =generated_sequence.shape[0]
if self.framework == "pt":
lowerCamelCase__: List[Any] =generated_sequence.reshape(__lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:])
elif self.framework == "tf":
lowerCamelCase__: int =tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any]=ReturnType.FULL_TEXT , UpperCAmelCase_ : str=True) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Dict =model_outputs["generated_sequence"][0]
lowerCamelCase__: int =model_outputs["input_ids"]
lowerCamelCase__: Any =model_outputs["prompt_text"]
lowerCamelCase__: str =generated_sequence.numpy().tolist()
lowerCamelCase__: Union[str, Any] =[]
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCamelCase__: str ={"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCamelCase__: int =self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCamelCase__: List[str] =0
else:
lowerCamelCase__: Union[str, Any] =len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ))
if return_type == ReturnType.FULL_TEXT:
lowerCamelCase__: Union[str, Any] =prompt_text + text[prompt_length:]
else:
lowerCamelCase__: List[Any] =text[prompt_length:]
lowerCamelCase__: Tuple ={"generated_text": all_text}
records.append(__lowerCAmelCase)
return records
| 707 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = "Hello, World!"
__A = "en_XX"
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =Path("data_bin" )
lowerCamelCase__: int =FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__a ).parent ) , checkpoint_file=Path(__a ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(__a ) , bpe="sentencepiece" , sentencepiece_model=str(Path(__a ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(__a )
lowerCamelCase__: Optional[int] =xmod.model.encoder.sentence_encoder
lowerCamelCase__: Tuple =XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase__: Optional[Any] =xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , __a )
lowerCamelCase__: Tuple =XmodForSequenceClassification(__a ) if classification_head else XmodForMaskedLM(__a )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase__: Any =xmod_sent_encoder.embed_tokens.weight
lowerCamelCase__: List[Any] =xmod_sent_encoder.embed_positions.weight
lowerCamelCase__: Any =torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCamelCase__: List[Any] =xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase__: Union[str, Any] =xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase__: List[Any] =model.roberta.encoder.layer[i]
lowerCamelCase__: Union[str, Any] =xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase__: Any =layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
lowerCamelCase__: List[str] =xmod_layer.self_attn.q_proj.weight
lowerCamelCase__: Any =xmod_layer.self_attn.q_proj.bias
lowerCamelCase__: Any =xmod_layer.self_attn.k_proj.weight
lowerCamelCase__: Tuple =xmod_layer.self_attn.k_proj.bias
lowerCamelCase__: Optional[int] =xmod_layer.self_attn.v_proj.weight
lowerCamelCase__: List[str] =xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase__: Optional[int] =layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
lowerCamelCase__: Dict =xmod_layer.self_attn.out_proj.weight
lowerCamelCase__: Optional[Any] =xmod_layer.self_attn.out_proj.bias
lowerCamelCase__: List[Any] =xmod_layer.self_attn_layer_norm.weight
lowerCamelCase__: Dict =xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase__: Optional[Any] =layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
lowerCamelCase__: int =xmod_layer.fca.weight
lowerCamelCase__: List[str] =xmod_layer.fca.bias
# output
lowerCamelCase__: str =layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
lowerCamelCase__: Optional[Any] =xmod_layer.fca.weight
lowerCamelCase__: int =xmod_layer.fca.bias
lowerCamelCase__: List[str] =xmod_layer.final_layer_norm.weight
lowerCamelCase__: List[Any] =xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase__: Tuple =xmod_layer.adapter_layer_norm.weight
lowerCamelCase__: List[str] =xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase__: Optional[int] =bert_output.adapter_modules[lang_code]
lowerCamelCase__: Optional[int] =xmod_layer.adapter_modules[lang_code]
lowerCamelCase__: Any =from_adapter.fca.weight
lowerCamelCase__: Tuple =from_adapter.fca.bias
lowerCamelCase__: Optional[Any] =from_adapter.fca.weight
lowerCamelCase__: Optional[int] =from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase__: Tuple =xmod_sent_encoder.layer_norm.weight
lowerCamelCase__: Dict =xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase__: List[Any] =xmod.model.classification_heads["mnli"].dense.weight
lowerCamelCase__: int =xmod.model.classification_heads["mnli"].dense.bias
lowerCamelCase__: List[str] =xmod.model.classification_heads["mnli"].out_proj.weight
lowerCamelCase__: Dict =xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
lowerCamelCase__: Tuple =xmod.model.encoder.lm_head.dense.weight
lowerCamelCase__: int =xmod.model.encoder.lm_head.dense.bias
lowerCamelCase__: List[Any] =xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase__: str =xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase__: str =xmod.model.encoder.lm_head.weight
lowerCamelCase__: str =xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase__: List[str] =xmod.encode(__a ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__a )
lowerCamelCase__: List[Any] =model(__a )[0]
if classification_head:
lowerCamelCase__: Union[str, Any] =xmod.model.classification_heads["mnli"](xmod.extract_features(__a ) )
else:
lowerCamelCase__: Dict =xmod.model(__a , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase__: Optional[int] =torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase__: Tuple =torch.allclose(__a , __a , atol=1e-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(__a ).mkdir(parents=__a , exist_ok=__a )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
__A = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 437 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] ='Hello, World!'
__lowerCAmelCase : Tuple ='en_XX'
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = Path("""data_bin""" )
lowercase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(lowerCAmelCase__ ).parent ) , checkpoint_file=Path(lowerCAmelCase__ ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(lowerCAmelCase__ ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(lowerCAmelCase__ ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(lowerCAmelCase__ )
lowercase = xmod.model.encoder.sentence_encoder
lowercase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowercase = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , lowerCAmelCase__ )
lowercase = XmodForSequenceClassification(lowerCAmelCase__ ) if classification_head else XmodForMaskedLM(lowerCAmelCase__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowercase = xmod_sent_encoder.embed_tokens.weight
lowercase = xmod_sent_encoder.embed_positions.weight
lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowercase = xmod_sent_encoder.layernorm_embedding.weight
lowercase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowercase = model.roberta.encoder.layer[i]
lowercase = xmod_sent_encoder.layers[i]
# self attention
lowercase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
lowercase = xmod_layer.self_attn.q_proj.weight
lowercase = xmod_layer.self_attn.q_proj.bias
lowercase = xmod_layer.self_attn.k_proj.weight
lowercase = xmod_layer.self_attn.k_proj.bias
lowercase = xmod_layer.self_attn.v_proj.weight
lowercase = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowercase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
lowercase = xmod_layer.self_attn.out_proj.weight
lowercase = xmod_layer.self_attn.out_proj.bias
lowercase = xmod_layer.self_attn_layer_norm.weight
lowercase = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowercase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
lowercase = xmod_layer.fca.weight
lowercase = xmod_layer.fca.bias
# output
lowercase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
lowercase = xmod_layer.fca.weight
lowercase = xmod_layer.fca.bias
lowercase = xmod_layer.final_layer_norm.weight
lowercase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowercase = xmod_layer.adapter_layer_norm.weight
lowercase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowercase = bert_output.adapter_modules[lang_code]
lowercase = xmod_layer.adapter_modules[lang_code]
lowercase = from_adapter.fca.weight
lowercase = from_adapter.fca.bias
lowercase = from_adapter.fca.weight
lowercase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowercase = xmod_sent_encoder.layer_norm.weight
lowercase = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowercase = xmod.model.classification_heads['mnli'].dense.weight
lowercase = xmod.model.classification_heads['mnli'].dense.bias
lowercase = xmod.model.classification_heads['mnli'].out_proj.weight
lowercase = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
lowercase = xmod.model.encoder.lm_head.dense.weight
lowercase = xmod.model.encoder.lm_head.dense.bias
lowercase = xmod.model.encoder.lm_head.layer_norm.weight
lowercase = xmod.model.encoder.lm_head.layer_norm.bias
lowercase = xmod.model.encoder.lm_head.weight
lowercase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowercase = xmod.encode(lowerCAmelCase__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(lowerCAmelCase__ )
lowercase = model(lowerCAmelCase__ )[0]
if classification_head:
lowercase = xmod.model.classification_heads['mnli'](xmod.extract_features(lowerCAmelCase__ ) )
else:
lowercase = xmod.model(lowerCAmelCase__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
lowercase = torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(lowerCAmelCase__ ).mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
__lowerCAmelCase : Union[str, Any] =parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 359 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def A_ ( a=None ):
"""simple docstring"""
if subparsers is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = subparsers.add_parser('test' )
else:
SCREAMING_SNAKE_CASE_ : List[str] = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=a , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=a )
return parser
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
SCREAMING_SNAKE_CASE_ : List[Any] = script_name
else:
SCREAMING_SNAKE_CASE_ : Any = f"--config_file={args.config_file} {script_name}"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['accelerate-launch'] + test_args.split()
SCREAMING_SNAKE_CASE_ : Any = execute_subprocess_async(a , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = test_command_parser()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parser.parse_args()
test_command(a )
if __name__ == "__main__":
main()
| 511 | 0 |
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : int ):
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(_lowerCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod() | 26 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase :Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase :List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self : str , a : List[List[List[str]]] , a : List[List[str]] , a : int = 1 , a : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a )
} | 26 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
def __init__( self : List[str] ,A : List[Any] ,A : List[str]=13 ,A : Any=32 ,A : List[str]=3 ,A : Optional[int]=4 ,A : Optional[int]=[10, 20, 30, 40] ,A : str=[2, 2, 3, 2] ,A : Optional[Any]=True ,A : Dict=True ,A : Tuple=37 ,A : List[str]="gelu" ,A : Optional[int]=10 ,A : List[Any]=0.0_2 ,A : Optional[int]=["stage2", "stage3", "stage4"] ,A : List[Any]=[2, 3, 4] ,A : List[Any]=None ,):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Union[str, Any] = image_size
UpperCAmelCase__ : Any = num_channels
UpperCAmelCase__ : Optional[int] = num_stages
UpperCAmelCase__ : str = hidden_sizes
UpperCAmelCase__ : List[Any] = depths
UpperCAmelCase__ : str = is_training
UpperCAmelCase__ : Dict = use_labels
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : List[Any] = hidden_act
UpperCAmelCase__ : Optional[Any] = num_labels
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : List[Any] = out_features
UpperCAmelCase__ : Optional[Any] = out_indices
UpperCAmelCase__ : Any = scope
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Tuple = None
if self.use_labels:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] ,self.num_labels )
UpperCAmelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : int ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=A ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def __lowercase ( self : str ,A : List[Any] ,A : Union[str, Any] ,A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ConvNextVaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowercase ( self : Union[str, Any] ,A : Union[str, Any] ,A : Optional[Any] ,A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ConvNextVaForImageClassification(A )
model.to(A )
model.eval()
UpperCAmelCase__ : Optional[int] = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : int ,A : Optional[int] ,A : Optional[int] ,A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ : Tuple = model(A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : str = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ : str = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = config_and_inputs
UpperCAmelCase__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = config_and_inputs
UpperCAmelCase__ : Dict = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class __lowercase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
snake_case_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ConvNextVaModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(self ,config_class=A ,has_text_modality=A ,hidden_size=37 )
def __lowercase ( self : List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : List[str] ):
'''simple docstring'''
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def __lowercase ( self : str ):
'''simple docstring'''
pass
def __lowercase ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase__ : int = True
if model_class.__name__ in [
*get_values(A ),
*get_values(A ),
]:
continue
UpperCAmelCase__ : Tuple = model_class(A )
model.to(A )
model.train()
UpperCAmelCase__ : List[Any] = self._prepare_for_class(A ,A ,return_labels=A )
UpperCAmelCase__ : Optional[int] = model(**A ).loss
loss.backward()
def __lowercase ( self : Tuple ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase__ : int = False
UpperCAmelCase__ : List[Any] = True
if (
model_class.__name__
in [*get_values(A ), *get_values(A )]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase__ : Dict = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase__ : Tuple = self._prepare_for_class(A ,A ,return_labels=A )
UpperCAmelCase__ : Optional[Any] = model(**A ).loss
loss.backward()
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(A )
UpperCAmelCase__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Optional[Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,A )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __lowercase ( self : Any ):
'''simple docstring'''
def check_hidden_states_output(A : Optional[Any] ,A : Union[str, Any] ,A : str ):
UpperCAmelCase__ : List[str] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : int = model(**self._prepare_for_class(A ,A ) )
UpperCAmelCase__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ : List[str] = self.model_tester.num_stages
self.assertEqual(len(A ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Tuple = True
check_hidden_states_output(A ,A ,A )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = ConvNextVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def __lowercase ( self : int ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(A )
UpperCAmelCase__ : Any = self.default_image_processor
UpperCAmelCase__ : str = prepare_img()
UpperCAmelCase__ : List[Any] = preprocessor(images=A ,return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**A )
# verify the logits
UpperCAmelCase__ : List[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,A )
UpperCAmelCase__ : Optional[Any] = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A ,atol=1e-4 ) )
| 65 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list:
"""simple docstring"""
snake_case_ : Tuple = len(_UpperCamelCase )
snake_case_ : Union[str, Any] = [[0] * n for i in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
snake_case_ : Any = y_points[i]
for i in range(2 , _UpperCamelCase ):
for j in range(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Optional[int] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=4_00 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , __UpperCamelCase=1 / 2_55 , __UpperCamelCase=True , ):
"""simple docstring"""
snake_case_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_pad
def __lowerCAmelCase ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase=False ):
"""simple docstring"""
if not batched:
snake_case_ = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
snake_case_ , snake_case_ = image.size
else:
snake_case_ , snake_case_ = image.shape[1], image.shape[2]
if w < h:
snake_case_ = int(self.size['shortest_edge'] * h / w )
snake_case_ = self.size['shortest_edge']
elif w > h:
snake_case_ = self.size['shortest_edge']
snake_case_ = int(self.size['shortest_edge'] * w / h )
else:
snake_case_ = self.size['shortest_edge']
snake_case_ = self.size['shortest_edge']
else:
snake_case_ = []
for image in image_inputs:
snake_case_ , snake_case_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
snake_case_ = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = YolosImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = YolosImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'size' ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
snake_case_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
snake_case_ = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
snake_case_ = self.image_processing_class(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase , do_rescale=__UpperCamelCase )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
snake_case_ = image_processing_a.pad(__UpperCamelCase , return_tensors='pt' )
snake_case_ = image_processing_a(__UpperCamelCase , return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {'image_id': 3_97_69, 'annotations': target}
# encode them
snake_case_ = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
snake_case_ = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors='pt' )
# verify pixel values
snake_case_ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , __UpperCamelCase )
snake_case_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
snake_case_ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCamelCase ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCamelCase )
snake_case_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
snake_case_ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCamelCase ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCamelCase ) )
# verify class_labels
snake_case_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCamelCase ) )
# verify orig_size
snake_case_ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCamelCase ) )
# verify size
snake_case_ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCamelCase ) )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
snake_case_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
snake_case_ = YolosImageProcessor(format='coco_panoptic' )
snake_case_ = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors='pt' )
# verify pixel values
snake_case_ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , __UpperCamelCase )
snake_case_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
snake_case_ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCamelCase ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCamelCase )
snake_case_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
snake_case_ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCamelCase ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCamelCase ) )
# verify class_labels
snake_case_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCamelCase ) )
# verify masks
snake_case_ = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __UpperCamelCase )
# verify orig_size
snake_case_ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCamelCase ) )
# verify size
snake_case_ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCamelCase ) )
| 46 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
A = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 46 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Any = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 |
from functools import reduce
__a = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase__ ( _lowercase = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _lowercase , _lowercase : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) )
for i in range(len(_lowercase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 30 | 0 |
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__A : str = HfArgumentParser(InitializationArguments)
__A : Dict = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__A : int = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__A : List[Any] = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
__A : Dict = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__A : Dict = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub) | 713 |
'''simple docstring'''
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
return " ".join(
"""""".join(word[::-1] ) if len(lowerCamelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 267 | 0 |
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : list[int | str] ):
create_state_space_tree(lowerCAmelCase_ , [] , 0 , [0 for i in range(len(lowerCAmelCase_ ) )] )
def snake_case_ ( lowerCAmelCase_ : list[int | str] , lowerCAmelCase_ : list[int | str] , lowerCAmelCase_ : int , lowerCAmelCase_ : list[int] , ):
if index == len(lowerCAmelCase_ ):
print(lowerCAmelCase_ )
return
for i in range(len(lowerCAmelCase_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__lowercase : Optional[int] = True
create_state_space_tree(lowerCAmelCase_ , lowerCAmelCase_ , index + 1 , lowerCAmelCase_ )
current_sequence.pop()
__lowercase : Optional[int] = False
lowerCamelCase : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCamelCase : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 149 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ):
return getitem, k
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Any ):
return setitem, k, v
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
return delitem, k
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , *lowerCAmelCase_ : Optional[int] ):
try:
return fun(lowerCAmelCase_ , *lowerCAmelCase_ ), None
except Exception as e:
return None, e
lowerCamelCase : List[str] = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCamelCase : Any = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCamelCase : Optional[int] = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCamelCase : Optional[int] = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCamelCase : Dict = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def snake_case_ ( lowerCAmelCase_ : Any ):
__lowercase : Tuple = HashMap(initial_block_size=4 )
__lowercase : Union[str, Any] = {}
for _, (fun, *args) in enumerate(lowerCAmelCase_ ):
__lowercase , __lowercase : Tuple = _run_operation(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ )
__lowercase , __lowercase : int = _run_operation(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ )
assert my_res == py_res
assert str(lowerCAmelCase_ ) == str(lowerCAmelCase_ )
assert set(lowerCAmelCase_ ) == set(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
assert set(my.items() ) == set(py.items() )
def snake_case_ ( ):
def is_public(lowerCAmelCase_ : str ) -> bool:
return not name.startswith("""_""" )
__lowercase : Optional[Any] = {name for name in dir({} ) if is_public(lowerCAmelCase_ )}
__lowercase : List[str] = {name for name in dir(HashMap() ) if is_public(lowerCAmelCase_ )}
assert dict_public_names > hash_public_names | 149 | 1 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowerCAmelCase__ :
def __init__( self , UpperCamelCase__ = "cpu" , UpperCamelCase__ = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
A__ = device
A__ = CLIPTokenizerFast.from_pretrained(UpperCamelCase__ )
A__ = [0.4814_5466, 0.457_8275, 0.4082_1073]
A__ = [0.2686_2954, 0.2613_0258, 0.2757_7711]
A__ = torchvision.transforms.Normalize(self.image_mean , self.image_std )
A__ = torchvision.transforms.Resize(2_24 )
A__ = torchvision.transforms.CenterCrop(2_24 )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.resize(UpperCamelCase__ )
A__ = self.center_crop(UpperCamelCase__ )
A__ = self.normalize(UpperCamelCase__ )
return images
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ):
'''simple docstring'''
A__ = self.tokenizer(text=UpperCamelCase__ , **UpperCamelCase__ )
A__ = self.preprocess_img(UpperCamelCase__ )
A__ = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowerCAmelCase__ ( nn.Module ):
def __init__( self , UpperCamelCase__=10 , UpperCamelCase__=0.01 , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__="image" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , ):
'''simple docstring'''
super().__init__()
A__ = None
A__ = device if device else get_device()
if vqgan:
A__ = vqgan
else:
A__ = load_vqgan(self.device , conf_path=UpperCamelCase__ , ckpt_path=UpperCamelCase__ )
self.vqgan.eval()
if clip:
A__ = clip
else:
A__ = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" )
self.clip.to(self.device )
A__ = ProcessorGradientFlow(device=self.device )
A__ = iterations
A__ = lr
A__ = log
A__ = make_grid
A__ = return_val
A__ = quantize
A__ = self.vqgan.decoder.z_shape
def lowercase_ ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=5 , UpperCamelCase__=True ):
'''simple docstring'''
A__ = []
if output_path is None:
A__ = "./animation.gif"
if input_path is None:
A__ = self.save_path
A__ = sorted(glob(input_path + "/*" ) )
if not len(UpperCamelCase__ ):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)" )
if len(UpperCamelCase__ ) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" )
A__ = total_duration / len(UpperCamelCase__ )
A__ = [frame_duration] * len(UpperCamelCase__ )
if extend_frames:
A__ = 1.5
A__ = 3
for file_name in paths:
if file_name.endswith(".png" ):
images.append(imageio.imread(UpperCamelCase__ ) )
imageio.mimsave(UpperCamelCase__ , UpperCamelCase__ , duration=UpperCamelCase__ )
print(f"""gif saved to {output_path}""" )
def lowercase_ ( self , UpperCamelCase__=None , UpperCamelCase__=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("Input either path or tensor" )
if img is not None:
raise NotImplementedError
A__ = preprocess(Image.open(UpperCamelCase__ ) , target_image_size=2_56 ).to(self.device )
A__ = preprocess_vqgan(UpperCamelCase__ )
A__ , *A__ = self.vqgan.encode(UpperCamelCase__ )
return z
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.latent.detach().requires_grad_()
A__ = base_latent + transform_vector
if self.quantize:
A__ , *A__ = self.vqgan.quantize(UpperCamelCase__ )
else:
A__ = trans_latent
return self.vqgan.decode(UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
A__ = self.clip_preprocessor(text=UpperCamelCase__ , images=UpperCamelCase__ , return_tensors="pt" , padding=UpperCamelCase__ )
A__ = self.clip(**UpperCamelCase__ )
A__ = clip_outputs.logits_per_image
if weights is not None:
A__ = similarity_logits * weights
return similarity_logits.sum()
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = self._get_clip_similarity(pos_prompts["prompts"] , UpperCamelCase__ , weights=(1 / pos_prompts["weights"]) )
if neg_prompts:
A__ = self._get_clip_similarity(neg_prompts["prompts"] , UpperCamelCase__ , weights=neg_prompts["weights"] )
else:
A__ = torch.tensor([1] , device=self.device )
A__ = -torch.log(UpperCamelCase__ ) + torch.log(UpperCamelCase__ )
return loss
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = torch.randn_like(self.latent , requires_grad=UpperCamelCase__ , device=self.device )
A__ = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
A__ = self._add_vector(UpperCamelCase__ )
A__ = loop_post_process(UpperCamelCase__ )
A__ = self._get_CLIP_loss(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
print("CLIP loss" , UpperCamelCase__ )
if self.log:
wandb.log({"CLIP Loss": clip_loss} )
clip_loss.backward(retain_graph=UpperCamelCase__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
wandb.init(reinit=UpperCamelCase__ , project="face-editor" )
wandb.config.update({"Positive Prompts": positive_prompts} )
wandb.config.update({"Negative Prompts": negative_prompts} )
wandb.config.update({"lr": self.lr, "iterations": self.iterations} )
if image_path:
A__ = Image.open(UpperCamelCase__ )
A__ = image.resize((2_56, 2_56) )
wandb.log("Original Image" , wandb.Image(UpperCamelCase__ ) )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if not prompts:
return []
A__ = []
A__ = []
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ = [prompt.strip() for prompt in prompts.split("|" )]
for prompt in prompts:
if isinstance(UpperCamelCase__ , (tuple, list) ):
A__ = prompt[0]
A__ = float(prompt[1] )
elif ":" in prompt:
A__ , A__ = prompt.split(":" )
A__ = float(UpperCamelCase__ )
else:
A__ = prompt
A__ = 1.0
processed_prompts.append(UpperCamelCase__ )
weights.append(UpperCamelCase__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(UpperCamelCase__ , device=self.device ),
}
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=None , ):
'''simple docstring'''
if image_path:
A__ = self._get_latent(UpperCamelCase__ )
else:
A__ = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
assert pos_prompts, "You must provide at least one positive prompt."
A__ = self.process_prompts(UpperCamelCase__ )
A__ = self.process_prompts(UpperCamelCase__ )
if save_final and save_path is None:
A__ = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"] ) )
if not os.path.exists(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
else:
A__ = save_path + "_" + get_timestamp()
os.makedirs(UpperCamelCase__ )
A__ = save_path
A__ = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("Original Image" )
show_pil(custom_to_pil(UpperCamelCase__ ) )
A__ = loop_post_process(UpperCamelCase__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ):
if show_intermediate:
show_pil(UpperCamelCase__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({"Image": wandb.Image(UpperCamelCase__ )} )
if show_final:
show_pil(UpperCamelCase__ )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) ) | 261 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"""weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""",
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
lowercase__ : List[str] = """roc_bert"""
def __init__( self , UpperCamelCase__=3_05_22 , UpperCamelCase__=7_68 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=30_72 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=5_12 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=True , UpperCamelCase__=0 , UpperCamelCase__="absolute" , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=7_68 , UpperCamelCase__=9_10 , UpperCamelCase__=5_12 , UpperCamelCase__=2_48_58 , UpperCamelCase__=True , **UpperCamelCase__ , ):
'''simple docstring'''
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = type_vocab_size
A__ = layer_norm_eps
A__ = use_cache
A__ = enable_pronunciation
A__ = enable_shape
A__ = pronunciation_embed_dim
A__ = pronunciation_vocab_size
A__ = shape_embed_dim
A__ = shape_vocab_size
A__ = concat_input
A__ = position_embedding_type
A__ = classifier_dropout
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) | 261 | 1 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__a = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__a = 25_6047
__a = 25_6145
@require_sentencepiece
@require_tokenizers
class __lowercase ( __snake_case , unittest.TestCase ):
UpperCamelCase = NllbTokenizer
UpperCamelCase = NllbTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = {}
def _lowercase ( self : Any ) -> Tuple:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = NllbTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = NllbTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
UpperCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
UpperCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
UpperCAmelCase = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(__lowerCamelCase )
UpperCAmelCase = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(__lowerCamelCase )
UpperCAmelCase = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
UpperCAmelCase = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(__lowerCamelCase )
UpperCAmelCase = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
UpperCAmelCase = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(__lowerCamelCase )
UpperCAmelCase = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
@require_torch
def _lowercase ( self : Dict ) -> List[str]:
"""simple docstring"""
if not self.test_seqaseq:
return
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
UpperCAmelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
UpperCAmelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
UpperCAmelCase = tokenizer.prepare_seqaseq_batch(
src_texts=__lowerCamelCase , tgt_texts=__lowerCamelCase , max_length=3 , max_target_length=1_0 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 1_0 )
# max_target_length will default to max_length if not specified
UpperCAmelCase = tokenizer.prepare_seqaseq_batch(
__lowerCamelCase , tgt_texts=__lowerCamelCase , max_length=3 , return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
UpperCAmelCase = tokenizer.prepare_seqaseq_batch(
src_texts=__lowerCamelCase , max_length=3 , max_target_length=1_0 , return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("""decoder_input_ids""" , __lowerCamelCase )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def _lowercase ( self : str ) -> List[str]:
"""simple docstring"""
pass
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase = [AddedToken("""<special>""" , lstrip=__lowerCamelCase )]
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase )
UpperCAmelCase = tokenizer_r.encode("""Hey this is a <special> token""" )
UpperCAmelCase = tokenizer_r.encode("""<special>""" , add_special_tokens=__lowerCamelCase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
UpperCAmelCase = self.tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase )
UpperCAmelCase = tokenizer_p.encode("""Hey this is a <special> token""" )
UpperCAmelCase = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
UpperCamelCase = '''facebook/nllb-200-distilled-600M'''
UpperCamelCase = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
UpperCamelCase = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
UpperCamelCase = [
25_60_47,
1_62_97,
13_44_08,
81_65,
24_80_66,
1_47_34,
9_50,
11_35,
10_57_21,
35_73,
83,
2_73_52,
1_08,
4_94_86,
2,
]
@classmethod
def _lowercase ( cls : int ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" )
UpperCAmelCase = 1
return cls
def _lowercase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 2_5_6_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 2_5_6_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 2_5_6_0_5_7 )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase )
def _lowercase ( self : str ) -> List[str]:
"""simple docstring"""
self.assertIn(__lowerCamelCase , self.tokenizer.all_special_ids )
# fmt: off
UpperCAmelCase = [RO_CODE, 4_2_5_4, 9_8_0_6_8, 1_1_2_9_2_3, 3_9_0_7_2, 3_9_0_9, 7_1_3, 1_0_2_7_6_7, 2_6, 1_7_3_1_4, 3_5_6_4_2, 1_4_6_8_3, 3_3_1_1_8, 2_0_2_2, 6_6_9_8_7, 2, 2_5_6_0_4_7]
# fmt: on
UpperCAmelCase = self.tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCamelCase )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , __lowerCamelCase )
UpperCAmelCase = 1_0
UpperCAmelCase = self.tokenizer(__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , __lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
def _lowercase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_6_2_0_3, 3] )
def _lowercase ( self : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCamelCase )
UpperCAmelCase = NllbTokenizer.from_pretrained(__lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCamelCase )
@require_torch
def _lowercase ( self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
UpperCAmelCase = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 1_5) , batch.input_ids.shape )
self.assertEqual((2, 1_5) , batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _lowercase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.tokenizer(self.src_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=3 , return_tensors="""pt""" )
UpperCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=1_0 , return_tensors="""pt""" )
UpperCAmelCase = targets["""input_ids"""]
UpperCAmelCase = shift_tokens_right(
__lowerCamelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , {
# A, test, EOS, en_XX
"""input_ids""": [[2_5_6_0_4_7, 7_0, 7_3_5_6, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_6_0_5_7,
} , )
@require_torch
def _lowercase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = True
UpperCAmelCase = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2, 2_5_6_0_4_7] )
UpperCAmelCase = False
UpperCAmelCase = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2] )
| 377 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 377 | 1 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class a__ ( snake_case__ ):
def __init__( self , *_A , **_A ):
"""simple docstring"""
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead." , _A , )
super().__init__(*_A , **_A )
| 552 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _a ( SCREAMING_SNAKE_CASE_ : Tuple ):
__lowerCAmelCase = VideoMAEConfig()
set_architecture_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "finetuned" not in model_name:
__lowerCAmelCase = False
if "finetuned" in model_name:
__lowerCAmelCase = "huggingface/label-files"
if "kinetics" in model_name:
__lowerCAmelCase = 4_00
__lowerCAmelCase = "kinetics400-id2label.json"
elif "ssv2" in model_name:
__lowerCAmelCase = 1_74
__lowerCAmelCase = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
__lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] ):
if "small" in model_name:
__lowerCAmelCase = 3_84
__lowerCAmelCase = 15_36
__lowerCAmelCase = 12
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 3
__lowerCAmelCase = 1_92
__lowerCAmelCase = 7_68
elif "large" in model_name:
__lowerCAmelCase = 10_24
__lowerCAmelCase = 40_96
__lowerCAmelCase = 24
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 5_12
__lowerCAmelCase = 20_48
elif "huge" in model_name:
__lowerCAmelCase = 12_80
__lowerCAmelCase = 51_20
__lowerCAmelCase = 32
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 6_40
__lowerCAmelCase = 25_60
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
if "encoder." in name:
__lowerCAmelCase = name.replace("encoder." , "" )
if "cls_token" in name:
__lowerCAmelCase = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
__lowerCAmelCase = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
__lowerCAmelCase = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
__lowerCAmelCase = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
__lowerCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
__lowerCAmelCase = name.replace("attn" , "attention.self" )
if "attn" in name:
__lowerCAmelCase = name.replace("attn" , "attention.attention" )
if "norm1" in name:
__lowerCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__lowerCAmelCase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
__lowerCAmelCase = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
__lowerCAmelCase = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
__lowerCAmelCase = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("head" , "classifier" )
return name
def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple ):
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if key.startswith("encoder." ):
__lowerCAmelCase = key.replace("encoder." , "" )
if "qkv" in key:
__lowerCAmelCase = key.split("." )
if key.startswith("decoder.blocks" ):
__lowerCAmelCase = config.decoder_hidden_size
__lowerCAmelCase = int(key_split[2] )
__lowerCAmelCase = "decoder.decoder_layers."
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = config.hidden_size
__lowerCAmelCase = int(key_split[1] )
__lowerCAmelCase = "videomae.encoder.layer."
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val
return orig_state_dict
def _a ( ):
__lowerCAmelCase = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__lowerCAmelCase = np.load(SCREAMING_SNAKE_CASE_ )
return list(SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
__lowerCAmelCase = get_videomae_config(SCREAMING_SNAKE_CASE_ )
if "finetuned" in model_name:
__lowerCAmelCase = VideoMAEForVideoClassification(SCREAMING_SNAKE_CASE_ )
else:
__lowerCAmelCase = VideoMAEForPreTraining(SCREAMING_SNAKE_CASE_ )
# download original checkpoint, hosted on Google Drive
__lowerCAmelCase = "pytorch_model.bin"
gdown.cached_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
if "model" in files:
__lowerCAmelCase = files["model"]
else:
__lowerCAmelCase = files["module"]
__lowerCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
# verify model on basic input
__lowerCAmelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__lowerCAmelCase = prepare_video()
__lowerCAmelCase = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
if "finetuned" not in model_name:
__lowerCAmelCase = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 4_00] )
__lowerCAmelCase = torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 1_74] )
__lowerCAmelCase = torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
__lowerCAmelCase = torch.Size([1, 14_08, 15_36] )
__lowerCAmelCase = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
__lowerCAmelCase = torch.Size([1, 14_08, 15_36] )
__lowerCAmelCase = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
__lowerCAmelCase = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
__lowerCAmelCase = torch.Size([1, 14_08, 15_36] )
__lowerCAmelCase = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 4_00] )
__lowerCAmelCase = torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 4_00] )
__lowerCAmelCase = torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 4_00] )
__lowerCAmelCase = torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 4_00] )
__lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
__lowerCAmelCase = torch.Size([1, 14_08, 15_36] )
__lowerCAmelCase = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 1_74] )
__lowerCAmelCase = torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
__lowerCAmelCase = torch.Size([1, 14_08, 15_36] )
__lowerCAmelCase = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 1_74] )
__lowerCAmelCase = torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(F"""Model name not supported. Should be one of {model_names}""" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__lowerCAmelCase = outputs.loss
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="nielsr" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCamelCase__ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 552 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( a_ : str ):
return [ord(a_ ) - 96 for elem in plain]
def SCREAMING_SNAKE_CASE ( a_ : list[int] ):
return "".join(chr(elem + 96 ) for elem in encoded )
def SCREAMING_SNAKE_CASE ( ):
__a = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , a_ )
print('Decoded:' , decode(a_ ) )
if __name__ == "__main__":
main()
| 539 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 539 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a : List[Any] = logging.get_logger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] ) -> List[str]:
__snake_case = b.T
__snake_case = np.sum(np.square(_UpperCAmelCase ) , axis=1 )
__snake_case = np.sum(np.square(_UpperCAmelCase ) , axis=0 )
__snake_case = np.matmul(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = aa[:, None] - 2 * ab + ba[None, :]
return d
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
__snake_case = x.reshape(-1 , 3 )
__snake_case = squared_euclidean_distance(_UpperCAmelCase , _UpperCAmelCase )
return np.argmin(_UpperCAmelCase , axis=1 )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self : Union[str, Any] , a_ : Optional[Union[List[List[int]], np.ndarray]] = None , a_ : bool = True , a_ : Dict[str, int] = None , a_ : PILImageResampling = PILImageResampling.BILINEAR , a_ : bool = True , a_ : bool = True , **a_ : Dict , ):
"""simple docstring"""
super().__init__(**a_ )
__snake_case = size if size is not None else {"height": 256, "width": 256}
__snake_case = get_size_dict(a_ )
__snake_case = np.array(a_ ) if clusters is not None else None
__snake_case = do_resize
__snake_case = size
__snake_case = resample
__snake_case = do_normalize
__snake_case = do_color_quantize
def A ( self : List[Any] , a_ : np.ndarray , a_ : Dict[str, int] , a_ : PILImageResampling = PILImageResampling.BILINEAR , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : int , ):
"""simple docstring"""
__snake_case = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
a_ , size=(size["height"], size["width"]) , resample=a_ , data_format=a_ , **a_ )
def A ( self : int , a_ : np.ndarray , a_ : Optional[Union[str, ChannelDimension]] = None , ):
"""simple docstring"""
__snake_case = rescale(image=a_ , scale=1 / 127.5 , data_format=a_ )
__snake_case = image - 1
return image
def A ( self : Optional[Any] , a_ : ImageInput , a_ : bool = None , a_ : Dict[str, int] = None , a_ : PILImageResampling = None , a_ : bool = None , a_ : Optional[bool] = None , a_ : Optional[Union[List[List[int]], np.ndarray]] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **a_ : List[str] , ):
"""simple docstring"""
__snake_case = do_resize if do_resize is not None else self.do_resize
__snake_case = size if size is not None else self.size
__snake_case = get_size_dict(a_ )
__snake_case = resample if resample is not None else self.resample
__snake_case = do_normalize if do_normalize is not None else self.do_normalize
__snake_case = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__snake_case = clusters if clusters is not None else self.clusters
__snake_case = np.array(a_ )
__snake_case = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(a_ ) for image in images]
if do_resize:
__snake_case = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_normalize:
__snake_case = [self.normalize(image=a_ ) for image in images]
if do_color_quantize:
__snake_case = [to_channel_dimension_format(a_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__snake_case = np.array(a_ )
__snake_case = color_quantize(a_ , a_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__snake_case = images.shape[0]
__snake_case = images.reshape(a_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__snake_case = list(a_ )
else:
__snake_case = [to_channel_dimension_format(a_ , a_ ) for image in images]
__snake_case = {"input_ids": images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 680 |
'''simple docstring'''
import os
from math import logaa
def __UpperCAmelCase ( _UpperCAmelCase : str = "base_exp.txt" ) -> int:
__snake_case = 0
__snake_case = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) ):
__snake_case , __snake_case = list(map(_UpperCAmelCase , line.split("," ) ) )
if x * logaa(_UpperCAmelCase ) > largest:
__snake_case = x * logaa(_UpperCAmelCase )
__snake_case = i + 1
return result
if __name__ == "__main__":
print(solution())
| 680 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 65 |
"""simple docstring"""
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ) -> Union[str, Any]:
A = {}
def UpperCamelCase__ ( self ) -> None:
print(self.vertex )
for i in self.vertex:
print(lowerCamelCase_ ,""" -> """ ,""" -> """.join([str(lowerCamelCase_ ) for j in self.vertex[i]] ) )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowerCamelCase_ )
else:
# else make a new vertex
A = [to_vertex]
def UpperCamelCase__ ( self ) -> None:
# visited array for storing already visited nodes
A = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(lowerCamelCase_ ,lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> None:
# mark start vertex as visited
A = True
print(lowerCamelCase_ ,end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowerCamelCase_ ,lowerCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase =Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 617 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 720 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _lowerCAmelCase ( __lowerCamelCase:List[Any] , __lowerCamelCase:int , __lowerCamelCase:List[Any]=None , __lowerCamelCase:Any=None , __lowerCamelCase:Any=None , __lowerCamelCase:List[str]=None , __lowerCamelCase:Optional[int]=None , __lowerCamelCase:Optional[int]=None , ):
'''simple docstring'''
if attention_mask is None:
__magic_name__ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__magic_name__ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__magic_name__ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__magic_name__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__magic_name__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A_ :
def __init__( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any]=1_3 , __lowerCamelCase : str=7 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[int]=9_9 , __lowerCamelCase : Tuple=1_6 , __lowerCamelCase : Any=2 , __lowerCamelCase : int=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[str]=3_2 , __lowerCamelCase : int=2 , __lowerCamelCase : Any=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Tuple=0.02 , ) -> List[Any]:
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = eos_token_id
__magic_name__ = pad_token_id
__magic_name__ = bos_token_id
__magic_name__ = initializer_range
def _snake_case ( self : str ) -> List[str]:
__magic_name__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__magic_name__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__magic_name__ = shift_tokens_right(__lowerCamelCase , 1 , 2 )
__magic_name__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__lowerCamelCase , )
__magic_name__ = prepare_blenderbot_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def _snake_case ( self : Optional[Any] ) -> Any:
__magic_name__ , __magic_name__ = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
__magic_name__ = 2_0
__magic_name__ = model_class_name(__lowerCamelCase )
__magic_name__ = model.encode(inputs_dict["input_ids"] )
__magic_name__ , __magic_name__ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__magic_name__ = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase )
__magic_name__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__magic_name__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__magic_name__ = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
__magic_name__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__magic_name__ = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCamelCase , )
__magic_name__ = model.decode(__lowerCamelCase , __lowerCamelCase )
__magic_name__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def _snake_case ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ) -> int:
__magic_name__ = 2_0
__magic_name__ = model_class_name(__lowerCamelCase )
__magic_name__ = model.encode(inputs_dict["input_ids"] )
__magic_name__ , __magic_name__ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__magic_name__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__magic_name__ = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase )
__magic_name__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__magic_name__ = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
__magic_name__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__magic_name__ = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
__magic_name__ = model.decode(__lowerCamelCase , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase )
__magic_name__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class A_ ( unittest.TestCase ):
UpperCAmelCase__ = 9_9
def _snake_case ( self : Dict ) -> Dict:
__magic_name__ = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
__magic_name__ = input_ids.shape[0]
__magic_name__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _snake_case ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ , __magic_name__ , __magic_name__ = self._get_config_and_data()
__magic_name__ = FlaxBlenderbotForConditionalGeneration(__lowerCamelCase )
__magic_name__ = lm_model(input_ids=__lowerCamelCase )
__magic_name__ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , __lowerCamelCase )
def _snake_case ( self : List[Any] ) -> Optional[Any]:
__magic_name__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
__magic_name__ = FlaxBlenderbotForConditionalGeneration(__lowerCamelCase )
__magic_name__ = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
__magic_name__ = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
__magic_name__ = lm_model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase )
__magic_name__ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , __lowerCamelCase )
def _snake_case ( self : Union[str, Any] ) -> List[Any]:
__magic_name__ = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
__magic_name__ = shift_tokens_right(__lowerCamelCase , 1 , 2 )
__magic_name__ = np.equal(__lowerCamelCase , 1 ).astype(np.floataa ).sum()
__magic_name__ = np.equal(__lowerCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__lowerCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A_ ( snake_case_ , unittest.TestCase , snake_case_ ):
UpperCAmelCase__ = True
UpperCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _snake_case ( self : List[Any] ) -> Any:
__magic_name__ = FlaxBlenderbotModelTester(self )
def _snake_case ( self : int ) -> Optional[Any]:
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Tuple ) -> Dict:
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
__magic_name__ = model_class(__lowerCamelCase )
@jax.jit
def encode_jitted(__lowerCamelCase : Dict , __lowerCamelCase : str=None , **__lowerCamelCase : List[str] ):
return model.encode(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase )
with self.subTest("JIT Enabled" ):
__magic_name__ = encode_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__magic_name__ = encode_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ = model_class(__lowerCamelCase )
__magic_name__ = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__magic_name__ = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(__lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict ):
return model.decode(
decoder_input_ids=__lowerCamelCase , decoder_attention_mask=__lowerCamelCase , encoder_outputs=__lowerCamelCase , )
with self.subTest("JIT Enabled" ):
__magic_name__ = decode_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__magic_name__ = decode_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case ( self : int ) -> int:
for model_class_name in self.all_model_classes:
__magic_name__ = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__magic_name__ = np.ones((1, 1) ) * model.config.eos_token_id
__magic_name__ = model(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." )
@slow
def _snake_case ( self : int ) -> List[Any]:
__magic_name__ = {"num_beams": 1, "early_stopping": True, "min_length": 1_5, "max_length": 2_5}
__magic_name__ = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
__magic_name__ = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=__lowerCamelCase )
__magic_name__ = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" )
__magic_name__ = ["Sam"]
__magic_name__ = tokenizer(__lowerCamelCase , return_tensors="jax" )
__magic_name__ = model.generate(**__lowerCamelCase , **__lowerCamelCase )
__magic_name__ = "Sam is a great name. It means \"sun\" in Gaelic."
__magic_name__ = tokenizer.batch_decode(__lowerCamelCase , **__lowerCamelCase )
assert generated_txt[0].strip() == tgt_text
| 468 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class UpperCamelCase :
def __init__( self :Union[str, Any] , __magic_name__ :List[Any] , __magic_name__ :Union[str, Any]=None , __magic_name__ :List[Any]=None , __magic_name__ :Dict=None , __magic_name__ :List[Any]="resnet50" , __magic_name__ :Tuple=3 , __magic_name__ :Optional[Any]=32 , __magic_name__ :str=3 , __magic_name__ :str=True , __magic_name__ :Optional[Any]=True , ) ->str:
lowercase : Tuple = parent
lowercase : Tuple = out_indices if out_indices is not None else [4]
lowercase : Union[str, Any] = stage_names
lowercase : Tuple = out_features
lowercase : Optional[int] = backbone
lowercase : Optional[Any] = batch_size
lowercase : Tuple = image_size
lowercase : Union[str, Any] = num_channels
lowercase : List[Any] = use_pretrained_backbone
lowercase : str = is_training
def __snake_case ( self :List[Any] ) ->Optional[Any]:
lowercase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Optional[int] = self.get_config()
return config, pixel_values
def __snake_case ( self :Tuple ) ->int:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __snake_case ( self :Any , __magic_name__ :List[str] , __magic_name__ :Optional[int] ) ->Tuple:
lowercase : List[str] = TimmBackbone(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
lowercase : str = model(__magic_name__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __snake_case ( self :str ) ->Dict:
lowercase : Tuple = self.prepare_config_and_inputs()
lowercase , lowercase : List[Any] = config_and_inputs
lowercase : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class UpperCamelCase (__snake_case , __snake_case , __snake_case , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = (TimmBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : str = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : List[Any] = False
_SCREAMING_SNAKE_CASE : List[str] = False
def __snake_case ( self :Dict ) ->List[str]:
lowercase : List[str] = TimmBackboneModelTester(self )
lowercase : Any = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def __snake_case ( self :Optional[Any] ) ->List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self :Union[str, Any] ) ->Optional[Any]:
lowercase : Dict = """resnet18"""
lowercase : int = """microsoft/resnet-18"""
lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__magic_name__ , use_timm_backbone=__magic_name__ )
lowercase : int = AutoBackbone.from_pretrained(__magic_name__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowercase : Tuple = AutoBackbone.from_pretrained(__magic_name__ , use_timm_backbone=__magic_name__ , out_indices=[1, 2, 3] )
lowercase : Any = AutoBackbone.from_pretrained(__magic_name__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def __snake_case ( self :Tuple ) ->Optional[int]:
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def __snake_case ( self :Optional[Any] ) ->int:
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def __snake_case ( self :Optional[Any] ) ->Optional[Any]:
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def __snake_case ( self :int ) ->List[Any]:
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def __snake_case ( self :int ) ->Tuple:
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def __snake_case ( self :List[Any] ) ->List[str]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __snake_case ( self :int ) ->Any:
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def __snake_case ( self :int ) ->int:
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def __snake_case ( self :str ) ->Union[str, Any]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __snake_case ( self :int ) ->Optional[int]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __snake_case ( self :List[Any] ) ->Optional[Any]:
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def __snake_case ( self :Union[str, Any] ) ->List[Any]:
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def __snake_case ( self :Tuple ) ->List[Any]:
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def __snake_case ( self :List[Any] ) ->int:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __snake_case ( self :Optional[Any] ) ->Optional[int]:
pass
def __snake_case ( self :Union[str, Any] ) ->Union[str, Any]:
lowercase , lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : str = model_class(__magic_name__ )
lowercase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Any = [*signature.parameters.keys()]
lowercase : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __snake_case ( self :Any ) ->List[str]:
lowercase , lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Any = True
lowercase : int = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowercase : Union[str, Any] = self.all_model_classes[0]
lowercase : Tuple = model_class(__magic_name__ )
model.to(__magic_name__ )
lowercase : Optional[Any] = self._prepare_for_class(__magic_name__ , __magic_name__ )
lowercase : Dict = model(**__magic_name__ )
lowercase : List[str] = outputs[0][-1]
# Encoder-/Decoder-only models
lowercase : str = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowercase : List[Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__magic_name__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __snake_case ( self :Any ) ->List[Any]:
lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[int] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowercase : Any = model(**__magic_name__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowercase : List[Any] = copy.deepcopy(__magic_name__ )
lowercase : Dict = None
lowercase : str = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowercase : Optional[Any] = model(**__magic_name__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowercase : str = copy.deepcopy(__magic_name__ )
lowercase : int = False
lowercase : List[str] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowercase : Dict = model(**__magic_name__ )
| 264 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase (__snake_case , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[Any] = LongformerTokenizer
_SCREAMING_SNAKE_CASE : int = True
_SCREAMING_SNAKE_CASE : List[str] = LongformerTokenizerFast
_SCREAMING_SNAKE_CASE : Dict = True
def __snake_case ( self :Dict ) ->Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowercase : Tuple = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowercase : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase : Dict = {"""unk_token""": """<unk>"""}
lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__magic_name__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
def __snake_case ( self :List[Any] , **__magic_name__ :Any ) ->Tuple:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
def __snake_case ( self :Optional[Any] , **__magic_name__ :Optional[Any] ) ->Dict:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
def __snake_case ( self :Tuple , __magic_name__ :Dict ) ->str:
lowercase : List[str] = """lower newer"""
lowercase : Any = """lower newer"""
return input_text, output_text
def __snake_case ( self :Tuple ) ->Union[str, Any]:
lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase : List[str] = """lower newer"""
lowercase : Tuple = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowercase : Optional[int] = tokenizer.tokenize(__magic_name__ ) # , add_prefix_space=True)
self.assertListEqual(__magic_name__ , __magic_name__ )
lowercase : str = tokens + [tokenizer.unk_token]
lowercase : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def __snake_case ( self :Any ) ->str:
lowercase : int = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__magic_name__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__magic_name__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def __snake_case ( self :Tuple ) ->Union[str, Any]:
lowercase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
lowercase : Tuple = tokenizer.encode("""sequence builders""" , add_special_tokens=__magic_name__ )
lowercase : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__magic_name__ )
lowercase : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowercase : List[Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowercase : int = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __snake_case ( self :Optional[Any] ) ->int:
lowercase : Optional[int] = self.get_tokenizer()
lowercase : Tuple = """Encode this sequence."""
lowercase : Dict = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
lowercase : List[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowercase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__magic_name__ , __magic_name__ )
lowercase : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowercase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__magic_name__ , __magic_name__ )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
lowercase : Union[str, Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
lowercase : List[str] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__magic_name__ , __magic_name__ )
# Testing spaces after special tokens
lowercase : Any = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ )} ) # mask token has a left space
lowercase : Any = tokenizer.convert_tokens_to_ids(__magic_name__ )
lowercase : Any = """Encode <mask> sequence"""
lowercase : str = """Encode <mask>sequence"""
lowercase : Optional[int] = tokenizer.encode(__magic_name__ )
lowercase : List[str] = encoded.index(__magic_name__ )
lowercase : Tuple = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__magic_name__ , __magic_name__ )
lowercase : Tuple = tokenizer.encode(__magic_name__ )
lowercase : List[str] = encoded.index(__magic_name__ )
lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__magic_name__ , __magic_name__ )
def __snake_case ( self :Any ) ->int:
pass
def __snake_case ( self :List[Any] ) ->str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase : int = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
lowercase : List[str] = self.tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
lowercase : Optional[int] = """A, <mask> AllenNLP sentence."""
lowercase : Any = tokenizer_r.encode_plus(__magic_name__ , add_special_tokens=__magic_name__ , return_token_type_ids=__magic_name__ )
lowercase : str = tokenizer_p.encode_plus(__magic_name__ , add_special_tokens=__magic_name__ , return_token_type_ids=__magic_name__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
lowercase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
lowercase : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
__magic_name__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__magic_name__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __snake_case ( self :List[str] ) ->Tuple:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowercase : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
lowercase : str = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowercase : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __magic_name__ )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __magic_name__ )
self.assertEqual(post_processor_state["""trim_offsets"""] , __magic_name__ )
def __snake_case ( self :Dict ) ->List[str]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase : Optional[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
lowercase : Optional[Any] = f"""{text_of_1_token} {text_of_1_token}"""
lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
lowercase : List[Any] = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__magic_name__ ) + 1, len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
lowercase : List[Any] = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
lowercase : List[Any] = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__magic_name__ ) + 1, len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
lowercase : Tuple = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
lowercase : List[str] = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__magic_name__ ), len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
lowercase : Optional[int] = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__magic_name__ ), len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
lowercase : Optional[int] = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowercase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
lowercase : Union[str, Any] = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__magic_name__ ) + 1, 1 + len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
lowercase : int = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
lowercase : Optional[int] = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__magic_name__ ), 1 + len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
lowercase : str = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
lowercase : Any = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__magic_name__ ), 1 + len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
| 264 | 1 |
'''simple docstring'''
import requests
A: Tuple = "YOUR API KEY"
def _UpperCAmelCase ( a : str , a : str = giphy_api_key ) -> list:
"""simple docstring"""
lowercase_ : Dict = '+'.join(query.split() )
lowercase_ : str = f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"
lowercase_ : str = requests.get(a ).json()['data']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A: int = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Dict = FlaxAutoencoderKL
@property
def A__ ( self ):
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = jax.random.PRNGKey(0 )
UpperCAmelCase_ = jax.random.uniform(lowerCAmelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def A__ ( self ):
UpperCAmelCase_ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
| 579 |
import requests
SCREAMING_SNAKE_CASE = "" # <-- Put your OpenWeatherMap appid here!
SCREAMING_SNAKE_CASE = "https://api.openweathermap.org/data/2.5/"
def snake_case__ ( __SCREAMING_SNAKE_CASE = "Chicago" , __SCREAMING_SNAKE_CASE = APPID ) -> dict:
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def snake_case__ ( __SCREAMING_SNAKE_CASE = "Kolkata, India" , __SCREAMING_SNAKE_CASE = APPID ) -> dict:
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def snake_case__ ( __SCREAMING_SNAKE_CASE = 55.68 , __SCREAMING_SNAKE_CASE = 12.57 , __SCREAMING_SNAKE_CASE = APPID ) -> dict:
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
SCREAMING_SNAKE_CASE = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 579 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : int = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
a : Union[str, Any] = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
a : str = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
@lru_cache()
def lowercase_ ( ):
'''simple docstring'''
__lowercase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__lowercase = bs[:]
__lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
__lowercase = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase = char
return pairs
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , snake_case_ , snake_case_ , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , **snake_case_ , ) -> Optional[Any]:
'''simple docstring'''
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , **snake_case_ , )
with open(snake_case_ , encoding='''utf-8''' ) as vocab_handle:
__lowercase = json.load(snake_case_ )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = errors # how to handle errors in decoding
__lowercase = bytes_to_unicode()
__lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case_ , encoding='''utf-8''' ) as merges_handle:
__lowercase = merges_handle.read().split('''\n''' )[1:-1]
__lowercase = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
__lowercase = {}
__lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def A ( self ) -> Any:
'''simple docstring'''
return len(self.encoder )
def A ( self ) -> List[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A ( self , snake_case_ ) -> List[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowercase = tuple(snake_case_ )
__lowercase = get_pairs(snake_case_ )
if not pairs:
return token
while True:
__lowercase = min(snake_case_ , key=lambda snake_case_ : self.bpe_ranks.get(snake_case_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(snake_case_ ):
try:
__lowercase = word.index(snake_case_ , snake_case_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase = j
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(snake_case_ )
__lowercase = new_word
if len(snake_case_ ) == 1:
break
else:
__lowercase = get_pairs(snake_case_ )
__lowercase = ''' '''.join(snake_case_ )
__lowercase = word
return word
def A ( self , snake_case_ ) -> Dict:
'''simple docstring'''
__lowercase = []
for token in re.findall(self.pat , snake_case_ ):
__lowercase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case_ ).split(''' ''' ) )
return bpe_tokens
def A ( self , snake_case_ ) -> Optional[int]:
'''simple docstring'''
return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) )
def A ( self , snake_case_ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(snake_case_ )
def A ( self , snake_case_ ) -> Tuple:
'''simple docstring'''
__lowercase = ''''''.join(snake_case_ )
__lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + '''\n''' )
__lowercase = 0
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
__lowercase = token_index
writer.write(''' '''.join(snake_case_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A ( self , snake_case_ , snake_case_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def A ( self , snake_case_ , snake_case_ = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A ( self , snake_case_ , snake_case_=False , **snake_case_ ) -> List[str]:
'''simple docstring'''
__lowercase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case_ ) > 0 and not text[0].isspace()):
__lowercase = ''' ''' + text
return (text, kwargs)
| 708 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowercase = TaConfig.from_json_file(_UpperCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
__lowercase = TaForConditionalGeneration(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 527 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger(__name__)
def _a ( lowerCamelCase ):
lowerCamelCase : Tuple = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase : Union[str, Any] = 192
lowerCamelCase : Dict = 768
lowerCamelCase : Union[str, Any] = 12
lowerCamelCase : int = 3
lowerCamelCase : List[str] = [800, 1333]
lowerCamelCase : Union[str, Any] = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase : List[Any] = 330
lowerCamelCase : Optional[int] = 14
lowerCamelCase : str = 6
lowerCamelCase : List[Any] = 1320
elif "yolos_s" in yolos_name:
lowerCamelCase : Any = 384
lowerCamelCase : str = 1536
lowerCamelCase : Optional[int] = 12
lowerCamelCase : Tuple = 6
elif "yolos_b" in yolos_name:
lowerCamelCase : Optional[Any] = [800, 1344]
lowerCamelCase : List[Any] = 91
lowerCamelCase : Optional[int] = """huggingface/label-files"""
lowerCamelCase : Optional[int] = """coco-detection-id2label.json"""
lowerCamelCase : List[str] = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase, repo_type="""dataset""" ), """r""" ) )
lowerCamelCase : Optional[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase : int = idalabel
lowerCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : str = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : Optional[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : Any = in_proj_weight[: config.hidden_size, :]
lowerCamelCase : Optional[Any] = in_proj_bias[: config.hidden_size]
lowerCamelCase : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : Optional[int] = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase : Dict = in_proj_bias[-config.hidden_size :]
def _a ( lowerCamelCase ):
if "backbone" in name:
lowerCamelCase : Union[str, Any] = name.replace("""backbone""", """vit""" )
if "cls_token" in name:
lowerCamelCase : Optional[int] = name.replace("""cls_token""", """embeddings.cls_token""" )
if "det_token" in name:
lowerCamelCase : Optional[Any] = name.replace("""det_token""", """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
lowerCamelCase : Dict = name.replace("""mid_pos_embed""", """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
lowerCamelCase : List[str] = name.replace("""pos_embed""", """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowerCamelCase : Optional[Any] = name.replace("""patch_embed.proj""", """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
lowerCamelCase : List[Any] = name.replace("""blocks""", """encoder.layer""" )
if "attn.proj" in name:
lowerCamelCase : Optional[Any] = name.replace("""attn.proj""", """attention.output.dense""" )
if "attn" in name:
lowerCamelCase : List[Any] = name.replace("""attn""", """attention.self""" )
if "norm1" in name:
lowerCamelCase : Tuple = name.replace("""norm1""", """layernorm_before""" )
if "norm2" in name:
lowerCamelCase : Optional[Any] = name.replace("""norm2""", """layernorm_after""" )
if "mlp.fc1" in name:
lowerCamelCase : Union[str, Any] = name.replace("""mlp.fc1""", """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase : List[Any] = name.replace("""mlp.fc2""", """output.dense""" )
if "class_embed" in name:
lowerCamelCase : Dict = name.replace("""class_embed""", """class_labels_classifier""" )
if "bbox_embed" in name:
lowerCamelCase : Union[str, Any] = name.replace("""bbox_embed""", """bbox_predictor""" )
if "vit.norm" in name:
lowerCamelCase : int = name.replace("""vit.norm""", """vit.layernorm""" )
return name
def _a ( lowerCamelCase, lowerCamelCase ):
for key in orig_state_dict.copy().keys():
lowerCamelCase : int = orig_state_dict.pop(lowerCamelCase )
if "qkv" in key:
lowerCamelCase : str = key.split(""".""" )
lowerCamelCase : int = int(key_split[2] )
lowerCamelCase : Dict = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase : Union[str, Any] = val[:dim, :]
lowerCamelCase : Optional[int] = val[
dim : dim * 2, :
]
lowerCamelCase : List[Any] = val[-dim:, :]
else:
lowerCamelCase : int = val[:dim]
lowerCamelCase : int = val[dim : dim * 2]
lowerCamelCase : str = val[-dim:]
else:
lowerCamelCase : int = val
return orig_state_dict
def _a ( ):
lowerCamelCase : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : int = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = False ):
lowerCamelCase : List[str] = get_yolos_config(lowerCamelCase )
# load original state_dict
lowerCamelCase : Optional[Any] = torch.load(lowerCamelCase, map_location="""cpu""" )["""model"""]
# load 🤗 model
lowerCamelCase : Union[str, Any] = YolosForObjectDetection(lowerCamelCase )
model.eval()
lowerCamelCase : List[str] = convert_state_dict(lowerCamelCase, lowerCamelCase )
model.load_state_dict(lowerCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase : Optional[Any] = 800 if yolos_name != """yolos_ti""" else 512
lowerCamelCase : Optional[Any] = YolosImageProcessor(format="""coco_detection""", size=lowerCamelCase )
lowerCamelCase : List[str] = image_processor(images=prepare_img(), return_tensors="""pt""" )
lowerCamelCase : List[str] = model(**lowerCamelCase )
lowerCamelCase , lowerCamelCase : Tuple = outputs.logits, outputs.pred_boxes
lowerCamelCase , lowerCamelCase : Tuple = None, None
if yolos_name == "yolos_ti":
lowerCamelCase : Union[str, Any] = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowerCamelCase : Dict = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase : Union[str, Any] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowerCamelCase : int = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase : Optional[Any] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowerCamelCase : Any = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase : Any = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowerCamelCase : Any = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowerCamelCase : List[Any] = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowerCamelCase : Optional[Any] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3], lowerCamelCase, atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3], lowerCamelCase, atol=1e-4 )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
lowerCamelCase : int = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
lowerCamelCase : str = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase, organization="""hustvl""" )
model.push_to_hub(lowerCamelCase, organization="""hustvl""" )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"""
""" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowerCamelCase =parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 681 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_lowerCamelCase =logging.get_logger(__name__)
class A__ :
def __init__( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = question_encoder
lowerCamelCase : Dict = generator
lowerCamelCase : Tuple = self.question_encoder
def UpperCamelCase__ ( self , __magic_name__ ):
if os.path.isfile(__magic_name__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Any = os.path.join(__magic_name__ , """question_encoder_tokenizer""" )
lowerCamelCase : str = os.path.join(__magic_name__ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(__magic_name__ )
self.generator.save_pretrained(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
lowerCamelCase : Any = kwargs.pop("""config""" , __magic_name__ )
if config is None:
lowerCamelCase : Tuple = RagConfig.from_pretrained(__magic_name__ )
lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
lowerCamelCase : Any = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=__magic_name__ , generator=__magic_name__ )
def __call__( self , *__magic_name__ , **__magic_name__ ):
return self.current_tokenizer(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.generator.batch_decode(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.generator.decode(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.question_encoder
def UpperCamelCase__ ( self ):
lowerCamelCase : str = self.generator
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , __magic_name__ , )
if max_length is None:
lowerCamelCase : int = self.current_tokenizer.model_max_length
lowerCamelCase : int = self(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowerCamelCase : int = self.current_tokenizer.model_max_length
lowerCamelCase : Dict = self(
text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
lowerCamelCase : List[Any] = labels["""input_ids"""]
return model_inputs
| 681 | 1 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowerCamelCase__ = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
lowerCamelCase__ = get_tests_dir("""fixtures/vocab.json""")
lowerCamelCase__ = get_tests_dir("""fixtures""")
class A__ ( unittest.TestCase ):
lowercase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 0
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Optional[Any] = WavaVecaConfig()
lowerCAmelCase__ : List[str] = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
# save in new folder
model_config.save_pretrained(a )
processor.save_pretrained(a )
lowerCAmelCase__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(a , os.path.join(a , a ) )
copyfile(a , os.path.join(a , 'vocab.json' ) )
lowerCAmelCase__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Dict = WavaVecaFeatureExtractor()
lowerCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
lowerCAmelCase__ : Dict = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in tokenizer
with open(os.path.join(a , a ) , 'r' ) as f:
lowerCAmelCase__ : List[str] = json.load(a )
config_dict.pop('processor_class' )
with open(os.path.join(a , a ) , 'w' ) as f:
f.write(json.dumps(a ) )
lowerCAmelCase__ : int = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Optional[Any] = WavaVecaFeatureExtractor()
lowerCAmelCase__ : Dict = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
lowerCAmelCase__ : List[str] = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in feature extractor
with open(os.path.join(a , a ) , 'r' ) as f:
lowerCAmelCase__ : Optional[Any] = json.load(a )
config_dict.pop('processor_class' )
with open(os.path.join(a , a ) , 'w' ) as f:
f.write(json.dumps(a ) )
lowerCAmelCase__ : Optional[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Any = WavaVecaConfig(processor_class='Wav2Vec2Processor' )
model_config.save_pretrained(a )
# copy relevant files
copyfile(a , os.path.join(a , 'vocab.json' ) )
# create emtpy sample processor
with open(os.path.join(a , a ) , 'w' ) as f:
f.write('{}' )
lowerCAmelCase__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
with self.assertRaises(a ):
lowerCAmelCase__ : Dict = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
lowerCAmelCase__ : Dict = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=a )
lowerCAmelCase__ : Any = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' , trust_remote_code=a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
lowerCAmelCase__ : Dict = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
lowerCAmelCase__ : Optional[int] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
lowerCAmelCase__ : Any = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=a , use_fast=a )
lowerCAmelCase__ : List[str] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
try:
AutoConfig.register('custom' , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoProcessor.register(a , a )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase__ : Optional[Any] = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ : List[Any] = os.path.join(a , 'vocab.txt' )
with open(a , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
lowerCAmelCase__ : Tuple = CustomTokenizer(a )
lowerCAmelCase__ : Dict = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(a )
lowerCAmelCase__ : List[str] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
class A__ ( __magic_name__ ):
lowercase = False
class A__ ( __magic_name__ ):
lowercase = False
class A__ ( __magic_name__ ):
lowercase = 'AutoFeatureExtractor'
lowercase = 'AutoTokenizer'
lowercase = False
try:
AutoConfig.register('custom' , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# If remote code is not set, the default is to use local classes.
lowerCAmelCase__ : Dict = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowerCAmelCase__ : Dict = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowerCAmelCase__ : Dict = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(processor.__class__.__name__ , 'BertTokenizerFast' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext' )
self.assertEqual(processor.__class__.__name__ , 'ConvNextImageProcessor' )
@is_staging_test
class A__ ( unittest.TestCase ):
lowercase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def _lowerCamelCase ( cls : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(a )
@classmethod
def _lowerCamelCase ( cls : str ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-processor' )
except HTTPError:
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , 'test-processor' ) , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : int = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , 'test-processor-org' ) , push_to_hub=a , use_auth_token=self._token , organization='valid_org' , )
lowerCAmelCase__ : List[str] = WavaVecaProcessor.from_pretrained('valid_org/test-processor-org' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowerCAmelCase__ : Union[str, Any] = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ : Optional[int] = os.path.join(a , 'vocab.txt' )
with open(a , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
lowerCAmelCase__ : Any = CustomTokenizer(a )
lowerCAmelCase__ : Tuple = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token )
lowerCAmelCase__ : str = Repository(a , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor',
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(a , 'tokenizer_config.json' ) ) as f:
lowerCAmelCase__ : List[str] = json.load(a )
self.assertDictEqual(
tokenizer_config['auto_map'] , {
'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None],
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(a , 'custom_feature_extraction.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , 'custom_tokenization.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , 'custom_processing.py' ) ) )
repo.push_to_hub()
lowerCAmelCase__ : int = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , 'CustomProcessor' ) | 69 |
from itertools import permutations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCAmelCase__ : str = [7, 11, 13, 17]
for i, test in enumerate(SCREAMING_SNAKE_CASE_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 10 ) -> int:
return sum(
int(''.join(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
for num in permutations(range(SCREAMING_SNAKE_CASE_ ) )
if is_substring_divisible(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 69 | 1 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( a ) -> Union[str, Any]:
__A : Optional[Any] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError('Quantized models are not supported.' )
__A : int = re.match(r'^mobilenet_v1_([^_]*)_([^_]*)$' , UpperCamelCase_ )
if matches:
__A : Optional[Any] = float(matches[1] )
__A : Optional[Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__A : Optional[int] = 10_01
__A : List[str] = 'imagenet-1k-id2label.json'
__A : List[str] = 'huggingface/label-files'
__A : Tuple = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type='dataset' ) , 'r' ) )
__A : Union[str, Any] = {int(UpperCamelCase_ ) + 1: v for k, v in idalabel.items()}
__A : Optional[int] = 'background'
__A : Dict = idalabel
__A : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__A : int = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( a , a , a , a=False ) -> Tuple:
__A : Optional[int] = get_mobilenet_va_config(UpperCamelCase_ )
# Load 🤗 model
__A : List[Any] = MobileNetVaForImageClassification(UpperCamelCase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__A : Tuple = MobileNetVaImageProcessor(
crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 32} , )
__A : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='pt' )
__A : Any = model(**UpperCamelCase_ )
__A : Any = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
__A : Dict = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
__A : Optional[int] = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
__A : int = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , UpperCamelCase_ , atol=1e-4 )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
print('Pushing to the hub...' )
__A : Any = 'google/' + model_name
image_processor.push_to_hub(UpperCamelCase_ )
model.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 239 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_lowercase = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
_lowercase = "sshleifer/student_marian_en_ro_6_1"
_lowercase = "sshleifer/tiny-mbart"
@require_torch
class _UpperCAmelCase ( A__ ):
def snake_case_ ( self , a__=False , a__=None , a__=True , a__=True , a__=True , a__=True , ):
A__ = self.run_trainer(
eval_steps=1 , max_len=1_2 , model_name=a__ , num_train_epochs=1 , distributed=a__ , extra_args_str=a__ , predict_with_generate=a__ , do_train=a__ , do_eval=a__ , do_predict=a__ , )
A__ = TrainerState.load_from_json(os.path.join(a__ , '''trainer_state.json''')).log_history
if not do_eval:
return
A__ = [log for log in logs if '''eval_loss''' in log.keys()]
A__ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
A__ = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , a__)
assert not math.isnan(float(last_step_stats['''eval_loss'''])), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def snake_case_ ( self):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def snake_case_ ( self):
self.run_seqaseq_quick(distributed=a__)
@require_torch_multi_gpu
def snake_case_ ( self):
self.run_seqaseq_quick(distributed=a__)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def snake_case_ ( self):
self.run_seqaseq_quick(distributed=a__ , extra_args_str='''--sharded_ddp simple''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def snake_case_ ( self):
self.run_seqaseq_quick(distributed=a__ , extra_args_str='''--sharded_ddp simple --fp16''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def snake_case_ ( self):
self.run_seqaseq_quick(distributed=a__ , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=a__)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def snake_case_ ( self):
self.run_seqaseq_quick(
distributed=a__ , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=a__)
@require_apex
@require_torch_gpu
def snake_case_ ( self):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=a__ , extra_args_str='''--fp16 --fp16_backend=apex''')
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=a__ , extra_args_str='''--fp16 --fp16_backend=apex''')
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''])
@require_torch_multi_gpu
def snake_case_ ( self , a__):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
A__ = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
A__ = experiments[experiment_id]
A__ = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
A__ = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**a__ , extra_args_str=data['''extra_args_str'''])
A__ = len(re.findall(a__ , cl.err))
self.assertEqual(a__ , data['''n_matches'''])
@slow
def snake_case_ ( self):
A__ = self.run_trainer(
eval_steps=2 , max_len=1_2_8 , model_name=a__ , learning_rate=3e-4 , num_train_epochs=1_0 , distributed=a__ , )
# Check metrics
A__ = TrainerState.load_from_json(os.path.join(a__ , '''trainer_state.json''')).log_history
A__ = [log for log in logs if '''eval_loss''' in log.keys()]
A__ = eval_metrics[0]
A__ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , a__)
# test if do_predict saves generations and metrics
A__ = os.listdir(a__)
A__ = {os.path.basename(a__) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def snake_case_ ( self):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(a__) -> Tuple[int, float]:
A__ = '''--skip_memory_metrics 0'''
A__ = self.run_trainer(
max_len=1_2_8 , model_name=a__ , learning_rate=3e-4 , num_train_epochs=1 , optim=a__ , distributed=a__ , extra_args_str=a__ , do_eval=a__ , do_predict=a__ , n_gpus_to_use=1 , )
# Check metrics
A__ = TrainerState.load_from_json(Path(a__ , '''trainer_state.json''')).log_history
A__ = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**2_0)
A__ = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**2_0)
A__ = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
A__ , A__ , A__ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value)
A__ , A__ , A__ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value)
A__ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
A__ = gpu_peak_mem_orig + gpu_alloc_mem_orig
A__ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
A__ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
A__ = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
a__ , a__ , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
a__ , a__ , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
a__ , a__ , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}")
def snake_case_ ( self , a__ , a__ , a__ , a__ = 3e-3 , a__ = "adafactor" , a__ = False , a__ = None , a__ = 0 , a__ = True , a__ = True , a__ = True , a__ = True , a__ = None , ):
A__ = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
A__ = self.get_auto_remove_tmp_dir()
A__ = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(a__)}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(a__)}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
A__ = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(a__)}\n ".split()
A__ = '''
--do_predict
'''.split()
A__ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
A__ = get_gpu_count()
A__ = get_torch_dist_unique_port()
A__ = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
A__ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(a__ , env=self.get_env())
else:
A__ = ['''run_translation.py'''] + args
with patch.object(a__ , '''argv''' , a__):
main()
return output_dir
| 632 | 0 |
import argparse
import struct
import unittest
class _snake_case :
'''simple docstring'''
def __init__( self: Dict ,lowerCamelCase_: bytes ) -> None:
UpperCAmelCase_ : int = data
# Initialize hash values
UpperCAmelCase_ : Dict = [
0x6_A_0_9_E_6_6_7,
0xB_B_6_7_A_E_8_5,
0x3_C_6_E_F_3_7_2,
0xA_5_4_F_F_5_3_A,
0x5_1_0_E_5_2_7_F,
0x9_B_0_5_6_8_8_C,
0x1_F_8_3_D_9_A_B,
0x5_B_E_0_C_D_1_9,
]
# Initialize round constants
UpperCAmelCase_ : Optional[int] = [
0x4_2_8_A_2_F_9_8,
0x7_1_3_7_4_4_9_1,
0xB_5_C_0_F_B_C_F,
0xE_9_B_5_D_B_A_5,
0x3_9_5_6_C_2_5_B,
0x5_9_F_1_1_1_F_1,
0x9_2_3_F_8_2_A_4,
0xA_B_1_C_5_E_D_5,
0xD_8_0_7_A_A_9_8,
0x1_2_8_3_5_B_0_1,
0x2_4_3_1_8_5_B_E,
0x5_5_0_C_7_D_C_3,
0x7_2_B_E_5_D_7_4,
0x8_0_D_E_B_1_F_E,
0x9_B_D_C_0_6_A_7,
0xC_1_9_B_F_1_7_4,
0xE_4_9_B_6_9_C_1,
0xE_F_B_E_4_7_8_6,
0x0_F_C_1_9_D_C_6,
0x2_4_0_C_A_1_C_C,
0x2_D_E_9_2_C_6_F,
0x4_A_7_4_8_4_A_A,
0x5_C_B_0_A_9_D_C,
0x7_6_F_9_8_8_D_A,
0x9_8_3_E_5_1_5_2,
0xA_8_3_1_C_6_6_D,
0xB_0_0_3_2_7_C_8,
0xB_F_5_9_7_F_C_7,
0xC_6_E_0_0_B_F_3,
0xD_5_A_7_9_1_4_7,
0x0_6_C_A_6_3_5_1,
0x1_4_2_9_2_9_6_7,
0x2_7_B_7_0_A_8_5,
0x2_E_1_B_2_1_3_8,
0x4_D_2_C_6_D_F_C,
0x5_3_3_8_0_D_1_3,
0x6_5_0_A_7_3_5_4,
0x7_6_6_A_0_A_B_B,
0x8_1_C_2_C_9_2_E,
0x9_2_7_2_2_C_8_5,
0xA_2_B_F_E_8_A_1,
0xA_8_1_A_6_6_4_B,
0xC_2_4_B_8_B_7_0,
0xC_7_6_C_5_1_A_3,
0xD_1_9_2_E_8_1_9,
0xD_6_9_9_0_6_2_4,
0xF_4_0_E_3_5_8_5,
0x1_0_6_A_A_0_7_0,
0x1_9_A_4_C_1_1_6,
0x1_E_3_7_6_C_0_8,
0x2_7_4_8_7_7_4_C,
0x3_4_B_0_B_C_B_5,
0x3_9_1_C_0_C_B_3,
0x4_E_D_8_A_A_4_A,
0x5_B_9_C_C_A_4_F,
0x6_8_2_E_6_F_F_3,
0x7_4_8_F_8_2_E_E,
0x7_8_A_5_6_3_6_F,
0x8_4_C_8_7_8_1_4,
0x8_C_C_7_0_2_0_8,
0x9_0_B_E_F_F_F_A,
0xA_4_5_0_6_C_E_B,
0xB_E_F_9_A_3_F_7,
0xC_6_7_1_7_8_F_2,
]
UpperCAmelCase_ : List[Any] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def A__ ( lowerCamelCase_: bytes ) -> bytes:
UpperCAmelCase_ : Dict = b"""\x80""" + (b"""\x00""" * (63 - (len(lowerCamelCase_ ) + 8) % 64))
UpperCAmelCase_ : Optional[Any] = struct.pack(""">Q""" ,(len(lowerCamelCase_ ) * 8) )
return data + padding + big_endian_integer
def A__ ( self: Optional[int] ) -> None:
# Convert into blocks of 64 bytes
UpperCAmelCase_ : List[str] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCAmelCase_ : List[Any] = list(struct.unpack(""">16L""" ,lowerCamelCase_ ) )
# add 48 0-ed integers
words += [0] * 48
UpperCAmelCase_ : Optional[Any] = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCAmelCase_ : Optional[int] = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
UpperCAmelCase_ : List[Any] = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
UpperCAmelCase_ : int = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0_0_0_0_0_0_0_0
# Compression
UpperCAmelCase_ : Any = self.ror(lowerCamelCase_ ,6 ) ^ self.ror(lowerCamelCase_ ,11 ) ^ self.ror(lowerCamelCase_ ,25 )
UpperCAmelCase_ : str = (e & f) ^ ((~e & 0xF_F_F_F_F_F_F_F) & g)
UpperCAmelCase_ : Union[str, Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0_0_0_0_0_0_0_0
UpperCAmelCase_ : List[str] = self.ror(lowerCamelCase_ ,2 ) ^ self.ror(lowerCamelCase_ ,13 ) ^ self.ror(lowerCamelCase_ ,22 )
UpperCAmelCase_ : Union[str, Any] = (a & b) ^ (a & c) ^ (b & c)
UpperCAmelCase_ : List[str] = (sa + maj) % 0x1_0_0_0_0_0_0_0_0
UpperCAmelCase_ : Union[str, Any] = (
g,
f,
e,
((d + tempa) % 0x1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0),
)
UpperCAmelCase_ : Optional[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCAmelCase_ : List[str] = [
((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
UpperCAmelCase_ : Optional[int] = """""".join([hex(lowerCamelCase_ )[2:].zfill(8 ) for value in self.hashes] )
def A__ ( self: List[str] ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> int:
return 0xF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: str ) -> None:
import hashlib
UpperCAmelCase_ : List[str] = bytes("""Test String""" ,"""utf-8""" )
self.assertEqual(SHAaaa(lowerCamelCase_ ).hash ,hashlib.shaaaa(lowerCamelCase_ ).hexdigest() )
def lowerCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
import doctest
doctest.testmod()
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
UpperCAmelCase_ : Any = parser.parse_args()
UpperCAmelCase_ : str = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
UpperCAmelCase_ : int = f.read()
else:
UpperCAmelCase_ : Dict = bytes(_a , """utf-8""" )
print(SHAaaa(_a ).hash )
if __name__ == "__main__":
main()
| 712 |
def lowerCamelCase_ ( _a : list ):
'''simple docstring'''
for i in range(len(_a ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Optional[int] = False
for j in range(_a , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : int = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : int = True
for j in range(_a ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : str = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase_ = [int(item) for item in user_input.split(''',''')]
print(F"{cocktail_shaker_sort(unsorted) = }")
| 322 | 0 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
UpperCAmelCase_ : Optional[Any] = "hf-internal-testing/tiny-random-bert"
UpperCAmelCase_ : int = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
UpperCAmelCase_ : Any = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class UpperCamelCase ( unittest.TestCase ):
def __A ( self ):
A__ = cached_file(UpperCamelCase__ , UpperCamelCase__ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCamelCase__ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) )
with open(os.path.join(UpperCamelCase__ , "refs" , "main" ) ) as f:
A__ = f.read()
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , "snapshots" , UpperCamelCase__ , UpperCamelCase__ ) )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# File is cached at the same place the second time.
A__ = cached_file(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# Using a specific revision to test the full commit hash.
A__ = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="9b8c223" )
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , "snapshots" , UpperCamelCase__ , UpperCamelCase__ ) )
def __A ( self ):
with self.assertRaisesRegex(UpperCamelCase__ , "is not a valid model identifier" ):
A__ = cached_file("tiny-random-bert" , UpperCamelCase__ )
with self.assertRaisesRegex(UpperCamelCase__ , "is not a valid git identifier" ):
A__ = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="aaaa" )
with self.assertRaisesRegex(UpperCamelCase__ , "does not appear to have a file named" ):
A__ = cached_file(UpperCamelCase__ , "conf" )
def __A ( self ):
with self.assertRaisesRegex(UpperCamelCase__ , "does not appear to have a file named" ):
A__ = cached_file(UpperCamelCase__ , "conf" )
with open(os.path.join(UpperCamelCase__ , "refs" , "main" ) ) as f:
A__ = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , ".no_exist" , UpperCamelCase__ , "conf" ) ) )
A__ = cached_file(UpperCamelCase__ , "conf" , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
A__ = cached_file(UpperCamelCase__ , "conf" , local_files_only=UpperCamelCase__ , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
A__ = mock.Mock()
A__ = 500
A__ = {}
A__ = HTTPError
A__ = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=UpperCamelCase__ ) as mock_head:
A__ = cached_file(UpperCamelCase__ , "conf" , _raise_exceptions_for_connection_errors=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self ):
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCamelCase__ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCamelCase__ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCamelCase__ ) )
def __A ( self ):
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , UpperCamelCase__ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , UpperCamelCase__ , revision="ahaha" )
A__ = get_file_from_repo("bert-base-cased" , UpperCamelCase__ )
# The name is the cached name which is not very easy to test, so instead we load the content.
A__ = json.loads(open(UpperCamelCase__ , "r" ).read() )
self.assertEqual(config["hidden_size"] , 768 )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(UpperCamelCase__ ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(UpperCamelCase__ , "a.txt" ) , str(UpperCamelCase__ ) )
self.assertIsNone(get_file_from_repo(UpperCamelCase__ , "b.txt" ) )
| 491 |
"""simple docstring"""
from collections.abc import Sequence
def __a ( A = None ) -> int:
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
A__ = nums[0]
for i in range(1 , len(A ) ):
A__ = nums[i]
A__ = max(A , ans + num , A )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__UpperCAmelCase =int(input("""Enter number of elements : """).strip())
__UpperCAmelCase =list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array)) | 337 | 0 |
from functools import lru_cache
@lru_cache
def UpperCAmelCase__ ( lowerCamelCase ):
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 453 |
_UpperCAmelCase : Union[str, Any] = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 453 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.