code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowerCamelCase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowerCamelCase__ = json.load(f)
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int ) -> Any:
return FSMTTokenizer.from_pretrained(__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] ) -> Union[str, Any]:
_UpperCamelCase : Dict = FSMTForConditionalGeneration.from_pretrained(__a ).to(__a )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any , __a : Tuple ) -> Union[str, Any]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
_UpperCamelCase : Optional[int] = F'''facebook/wmt19-{pair}'''
_UpperCamelCase : str = self.get_tokenizer(__a )
_UpperCamelCase : Union[str, Any] = self.get_model(__a )
_UpperCamelCase : Dict = bleu_data[pair]["src"]
_UpperCamelCase : Tuple = bleu_data[pair]["tgt"]
_UpperCamelCase : str = tokenizer(__a , return_tensors="pt" , truncation=__a , padding="longest" ).to(__a )
_UpperCamelCase : Tuple = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
_UpperCamelCase : Optional[Any] = tokenizer.batch_decode(
__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
_UpperCamelCase : List[str] = calculate_bleu(__a , __a )
print(__a )
self.assertGreaterEqual(scores["bleu"] , __a )
| 310
|
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = tau * frequency / samplerate
_UpperCamelCase : Optional[int] = sin(lowercase_ )
_UpperCamelCase : Dict = cos(lowercase_ )
_UpperCamelCase : Any = _sin / (2 * q_factor)
_UpperCamelCase : str = (1 - _cos) / 2
_UpperCamelCase : Any = 1 - _cos
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : List[str] = -2 * _cos
_UpperCamelCase : Tuple = 1 - alpha
_UpperCamelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : List[str] = tau * frequency / samplerate
_UpperCamelCase : str = sin(lowercase_ )
_UpperCamelCase : Optional[Any] = cos(lowercase_ )
_UpperCamelCase : Dict = _sin / (2 * q_factor)
_UpperCamelCase : List[Any] = (1 + _cos) / 2
_UpperCamelCase : Optional[int] = -1 - _cos
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : int = -2 * _cos
_UpperCamelCase : str = 1 - alpha
_UpperCamelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Tuple = tau * frequency / samplerate
_UpperCamelCase : Optional[int] = sin(lowercase_ )
_UpperCamelCase : Dict = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Dict = _sin / 2
_UpperCamelCase : int = 0
_UpperCamelCase : str = -ba
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : Optional[int] = -2 * _cos
_UpperCamelCase : Optional[Any] = 1 - alpha
_UpperCamelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : str = tau * frequency / samplerate
_UpperCamelCase : Optional[Any] = sin(lowercase_ )
_UpperCamelCase : Optional[int] = cos(lowercase_ )
_UpperCamelCase : int = _sin / (2 * q_factor)
_UpperCamelCase : List[str] = 1 - alpha
_UpperCamelCase : int = -2 * _cos
_UpperCamelCase : Union[str, Any] = 1 + alpha
_UpperCamelCase : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : int = tau * frequency / samplerate
_UpperCamelCase : int = sin(lowercase_ )
_UpperCamelCase : List[Any] = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Optional[int] = 10 ** (gain_db / 40)
_UpperCamelCase : str = 1 + alpha * big_a
_UpperCamelCase : Union[str, Any] = -2 * _cos
_UpperCamelCase : Optional[int] = 1 - alpha * big_a
_UpperCamelCase : int = 1 + alpha / big_a
_UpperCamelCase : Optional[Any] = -2 * _cos
_UpperCamelCase : Any = 1 - alpha / big_a
_UpperCamelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = tau * frequency / samplerate
_UpperCamelCase : Any = sin(lowercase_ )
_UpperCamelCase : Union[str, Any] = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Union[str, Any] = 10 ** (gain_db / 40)
_UpperCamelCase : Dict = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase : int = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase : Dict = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase : int = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase : List[str] = 2 * sqrt(lowercase_ ) * alpha
_UpperCamelCase : Any = big_a * (pmc + aaa)
_UpperCamelCase : Dict = 2 * big_a * mpc
_UpperCamelCase : str = big_a * (pmc - aaa)
_UpperCamelCase : Dict = ppmc + aaa
_UpperCamelCase : List[Any] = -2 * pmpc
_UpperCamelCase : Dict = ppmc - aaa
_UpperCamelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Optional[int] = tau * frequency / samplerate
_UpperCamelCase : int = sin(lowercase_ )
_UpperCamelCase : Any = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : str = 10 ** (gain_db / 40)
_UpperCamelCase : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase : List[str] = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase : Dict = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase : Optional[Any] = 2 * sqrt(lowercase_ ) * alpha
_UpperCamelCase : List[Any] = big_a * (ppmc + aaa)
_UpperCamelCase : Dict = -2 * big_a * pmpc
_UpperCamelCase : Dict = big_a * (ppmc - aaa)
_UpperCamelCase : Optional[Any] = pmc + aaa
_UpperCamelCase : Any = 2 * mpc
_UpperCamelCase : Any = pmc - aaa
_UpperCamelCase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
| 310
| 1
|
"""simple docstring"""
import baseaa
def lowercase__ ( lowercase_ ) -> bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode("utf-8" ) )
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
return baseaa.baadecode(lowercase_ ).decode("utf-8" )
if __name__ == "__main__":
lowerCamelCase__ = "Hello World!"
lowerCamelCase__ = baseaa_encode(test)
print(encoded)
lowerCamelCase__ = baseaa_decode(encoded)
print(decoded)
| 310
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ )
if weight_type is not None:
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ).shape
else:
_UpperCamelCase : int = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "weight_g":
_UpperCamelCase : int = value
elif weight_type == "weight_v":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "bias":
_UpperCamelCase : int = value
else:
_UpperCamelCase : Any = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[str] = []
_UpperCamelCase : Any = fairseq_model.state_dict()
_UpperCamelCase : Union[str, Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,hf_model.config.feat_extract_norm == "group" ,)
_UpperCamelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
_UpperCamelCase : Dict = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_UpperCamelCase : Any = True
if "*" in mapped_key:
_UpperCamelCase : Dict = name.split(lowercase_ )[0].split("." )[-2]
_UpperCamelCase : Any = mapped_key.replace("*" ,lowercase_ )
if "weight_g" in name:
_UpperCamelCase : str = "weight_g"
elif "weight_v" in name:
_UpperCamelCase : Any = "weight_v"
elif "weight" in name:
_UpperCamelCase : List[str] = "weight"
elif "bias" in name:
_UpperCamelCase : List[Any] = "bias"
else:
_UpperCamelCase : str = None
set_recursively(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Any = full_name.split("conv_layers." )[-1]
_UpperCamelCase : Optional[Any] = name.split("." )
_UpperCamelCase : Union[str, Any] = int(items[0] )
_UpperCamelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCamelCase : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCamelCase : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCamelCase : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCamelCase : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Dict = SEWConfig()
if is_finetuned:
_UpperCamelCase : Dict = model.wav_encoder.wav_model.cfg
else:
_UpperCamelCase : List[Any] = model.cfg
_UpperCamelCase : Any = fs_config.conv_bias
_UpperCamelCase : str = eval(fs_config.conv_feature_layers )
_UpperCamelCase : Any = [x[0] for x in conv_layers]
_UpperCamelCase : List[Any] = [x[1] for x in conv_layers]
_UpperCamelCase : Union[str, Any] = [x[2] for x in conv_layers]
_UpperCamelCase : str = "gelu"
_UpperCamelCase : List[str] = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
_UpperCamelCase : Optional[int] = 0.0
_UpperCamelCase : Dict = fs_config.activation_fn.name
_UpperCamelCase : Any = fs_config.encoder_embed_dim
_UpperCamelCase : Optional[Any] = 0.02
_UpperCamelCase : str = fs_config.encoder_ffn_embed_dim
_UpperCamelCase : int = 1e-5
_UpperCamelCase : Optional[int] = fs_config.encoder_layerdrop
_UpperCamelCase : str = fs_config.encoder_attention_heads
_UpperCamelCase : Tuple = fs_config.conv_pos_groups
_UpperCamelCase : List[str] = fs_config.conv_pos
_UpperCamelCase : Optional[int] = len(lowercase_ )
_UpperCamelCase : Union[str, Any] = fs_config.encoder_layers
_UpperCamelCase : Union[str, Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_UpperCamelCase : List[str] = model.cfg
_UpperCamelCase : List[str] = fs_config.final_dropout
_UpperCamelCase : Optional[Any] = fs_config.layerdrop
_UpperCamelCase : int = fs_config.activation_dropout
_UpperCamelCase : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_UpperCamelCase : int = fs_config.attention_dropout
_UpperCamelCase : int = fs_config.dropout_input
_UpperCamelCase : List[Any] = fs_config.dropout
_UpperCamelCase : List[Any] = fs_config.mask_channel_length
_UpperCamelCase : List[str] = fs_config.mask_channel_prob
_UpperCamelCase : Optional[Any] = fs_config.mask_length
_UpperCamelCase : Optional[int] = fs_config.mask_prob
_UpperCamelCase : List[str] = "Wav2Vec2FeatureExtractor"
_UpperCamelCase : Optional[Any] = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=True ) -> str:
"""simple docstring"""
if is_finetuned:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_UpperCamelCase : str = SEWConfig.from_pretrained(lowercase_ )
else:
_UpperCamelCase : Optional[int] = convert_config(model[0] ,lowercase_ )
_UpperCamelCase : List[str] = model[0].eval()
_UpperCamelCase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False
_UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=lowercase_ ,return_attention_mask=lowercase_ ,)
if is_finetuned:
if dict_path:
_UpperCamelCase : Union[str, Any] = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCamelCase : List[str] = target_dict.pad_index
_UpperCamelCase : Optional[int] = target_dict.bos_index
_UpperCamelCase : Any = target_dict.pad_index
_UpperCamelCase : List[Any] = target_dict.bos_index
_UpperCamelCase : List[str] = target_dict.eos_index
_UpperCamelCase : Optional[Any] = len(target_dict.symbols )
_UpperCamelCase : List[Any] = os.path.join(lowercase_ ,"vocab.json" )
if not os.path.isdir(lowercase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase_ ) )
return
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
with open(lowercase_ ,"w" ,encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices ,lowercase_ )
_UpperCamelCase : Optional[Any] = WavaVecaCTCTokenizer(
lowercase_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=lowercase_ ,)
_UpperCamelCase : List[str] = WavaVecaProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
_UpperCamelCase : List[Any] = SEWForCTC(lowercase_ )
else:
_UpperCamelCase : int = SEWModel(lowercase_ )
feature_extractor.save_pretrained(lowercase_ )
recursively_load_weights(lowercase_ ,lowercase_ ,lowercase_ )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowerCamelCase__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 310
| 1
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowerCamelCase__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = "sequence-classification"
def __init__( self : Optional[int] , __a : List[Any] ) -> Optional[int]:
if type(__a ) == dict:
_UpperCamelCase : Optional[int] = Namespace(**__a )
_UpperCamelCase : int = glue_output_modes[hparams.task]
_UpperCamelCase : Union[str, Any] = glue_tasks_num_labels[hparams.task]
super().__init__(__a , __a , self.mode )
def __SCREAMING_SNAKE_CASE ( self : List[str] , **__a : Optional[int] ) -> int:
return self.model(**__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] , __a : str ) -> List[str]:
_UpperCamelCase : List[Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCamelCase : Optional[int] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
_UpperCamelCase : Union[str, Any] = self(**__a )
_UpperCamelCase : int = outputs[0]
_UpperCamelCase : List[Any] = self.trainer.lr_schedulers[0]["scheduler"]
_UpperCamelCase : Union[str, Any] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : str = self.hparams
_UpperCamelCase : Optional[Any] = processors[args.task]()
_UpperCamelCase : List[str] = processor.get_labels()
for mode in ["train", "dev"]:
_UpperCamelCase : List[str] = self._feature_file(__a )
if os.path.exists(__a ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , __a )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
_UpperCamelCase : List[str] = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
_UpperCamelCase : Tuple = convert_examples_to_features(
__a , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , __a )
torch.save(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : int , __a : bool = False ) -> DataLoader:
_UpperCamelCase : Any = "dev" if mode == "test" else mode
_UpperCamelCase : Any = self._feature_file(__a )
logger.info("Loading features from cached file %s" , __a )
_UpperCamelCase : Any = torch.load(__a )
_UpperCamelCase : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_UpperCamelCase : Union[str, Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_UpperCamelCase : Union[str, Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_UpperCamelCase : int = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_UpperCamelCase : str = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__a , __a , __a , __a ) , batch_size=__a , shuffle=__a , )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Dict , __a : str ) -> Any:
_UpperCamelCase : Any = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCamelCase : str = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
_UpperCamelCase : Optional[Any] = self(**__a )
_UpperCamelCase, _UpperCamelCase : List[Any] = outputs[:2]
_UpperCamelCase : Optional[Any] = logits.detach().cpu().numpy()
_UpperCamelCase : Dict = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] ) -> tuple:
_UpperCamelCase : Tuple = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
_UpperCamelCase : Any = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_UpperCamelCase : int = np.argmax(__a , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_UpperCamelCase : Union[str, Any] = np.squeeze(__a )
_UpperCamelCase : Any = np.concatenate([x["target"] for x in outputs] , axis=0 )
_UpperCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCamelCase : Any = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCamelCase : Optional[Any] = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , __a , __a )}
_UpperCamelCase : List[Any] = dict(results.items() )
_UpperCamelCase : Optional[Any] = results
return ret, preds_list, out_label_list
def __SCREAMING_SNAKE_CASE ( self : str , __a : list ) -> dict:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = self._eval_end(__a )
_UpperCamelCase : Optional[Any] = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Tuple ) -> dict:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = self._eval_end(__a )
_UpperCamelCase : List[Any] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : List[Any] , __a : Tuple ) -> int:
BaseTransformer.add_model_specific_args(__a , __a )
parser.add_argument(
"--max_seq_length" , default=128 , type=__a , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=__a , required=__a , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=__a , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Tuple = argparse.ArgumentParser()
add_generic_args(lowercase_ ,os.getcwd() )
_UpperCamelCase : int = GLUETransformer.add_model_specific_args(lowercase_ ,os.getcwd() )
_UpperCamelCase : Dict = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_UpperCamelCase : Optional[Any] = os.path.join(
"./results" ,F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' ,)
os.makedirs(args.output_dir )
_UpperCamelCase : Any = GLUETransformer(lowercase_ )
_UpperCamelCase : Dict = generic_train(lowercase_ ,lowercase_ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_UpperCamelCase : List[str] = sorted(glob.glob(os.path.join(args.output_dir ,"checkpoint-epoch=*.ckpt" ) ,recursive=lowercase_ ) )
_UpperCamelCase : Tuple = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(lowercase_ )
if __name__ == "__main__":
main()
| 310
|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : int = prime_factors(lowercase_ )
if is_square_free(lowercase_ ):
return -1 if len(lowercase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 1
|
"""simple docstring"""
from numpy import exp, pi, sqrt
def lowercase__ ( lowercase_ ,lowercase_ = 0.0 ,lowercase_ = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = GPTaTokenizer
SCREAMING_SNAKE_CASE__ :Tuple = GPTaTokenizerFast
SCREAMING_SNAKE_CASE__ :Dict = True
SCREAMING_SNAKE_CASE__ :int = {"add_prefix_space": True}
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_UpperCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCamelCase : str = {"unk_token": "<unk>"}
_UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Optional[int] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Union[str, Any] ) -> int:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> Tuple:
_UpperCamelCase : List[Any] = "lower newer"
_UpperCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase : Optional[Any] = "lower newer"
_UpperCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_UpperCamelCase : Any = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : str = tokens + [tokenizer.unk_token]
_UpperCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = "lower newer"
# Testing tokenization
_UpperCamelCase : str = tokenizer.tokenize(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
_UpperCamelCase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
_UpperCamelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : List[Any] = tokenizer.encode(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
_UpperCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_UpperCamelCase : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : int , *__a : int , **__a : List[Any] ) -> Union[str, Any]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int=15 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
_UpperCamelCase : Optional[int] = "This is a simple input"
_UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Dict = ("This is a simple input", "This is a pair")
_UpperCamelCase : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
_UpperCamelCase : Union[str, Any] = "This is a simple input"
_UpperCamelCase : Optional[Any] = ["This is a simple input looooooooong", "This is a simple input"]
_UpperCamelCase : str = ("This is a simple input", "This is a pair")
_UpperCamelCase : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_UpperCamelCase : Union[str, Any] = tokenizer.pad_token_id
_UpperCamelCase : str = tokenizer(__a , padding="max_length" , max_length=30 , return_tensors="np" )
_UpperCamelCase : Tuple = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
_UpperCamelCase : str = tokenizer(*__a , padding="max_length" , max_length=60 , return_tensors="np" )
_UpperCamelCase : Optional[int] = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
_UpperCamelCase : Any = "$$$"
_UpperCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
_UpperCamelCase : int = "This is a simple input"
_UpperCamelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Union[str, Any] = tokenizer.bos_token_id
_UpperCamelCase : str = tokenizer(__a )
_UpperCamelCase : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCamelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_UpperCamelCase : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
_UpperCamelCase : Optional[Any] = [self.get_tokenizer(do_lower_case=__a , add_bos_token=__a )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : Tuple = "Encode this."
_UpperCamelCase : List[str] = "This one too please."
_UpperCamelCase : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a )
encoded_sequence += tokenizer.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer.encode_plus(
__a , __a , add_special_tokens=__a , return_special_tokens_mask=__a , )
_UpperCamelCase : str = encoded_sequence_dict["input_ids"]
_UpperCamelCase : Optional[int] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(__a ) , len(__a ) )
_UpperCamelCase : Union[str, Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__a )
]
_UpperCamelCase : Union[str, Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__a , __a )
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Any = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("test_opt" )
_UpperCamelCase : str = AutoTokenizer.from_pretrained("./test_opt" )
_UpperCamelCase : Optional[Any] = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
_UpperCamelCase : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Union[str, Any] = tokenizer.encode(
__a , )
# Same as above
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[str] = "bos"
_UpperCamelCase : Tuple = tokenizer.get_vocab()["bos"]
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : List[Any] = tokenizer.encode(
__a , )
# We changed the bos token
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("./tok" )
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
_UpperCamelCase : Tuple = tokenizer.encode(
__a , )
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
| 310
| 1
|
"""simple docstring"""
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" ,[
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] ,)
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Any = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = input_paths_and_base_extractors[compression_format]
if input_path is None:
_UpperCamelCase : Dict = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowercase_ )
assert base_extractor.is_extractable(lowercase_ )
_UpperCamelCase : int = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(lowercase_ ,lowercase_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_UpperCamelCase : Any = file_path.read_text(encoding="utf-8" )
else:
_UpperCamelCase : Union[str, Any] = output_path.read_text(encoding="utf-8" )
_UpperCamelCase : List[str] = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" ,[
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] ,)
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Tuple = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
_UpperCamelCase : List[str] = input_paths[compression_format]
if input_path is None:
_UpperCamelCase : List[Any] = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowercase_ )
_UpperCamelCase : int = Extractor.infer_extractor_format(lowercase_ )
assert extractor_format is not None
_UpperCamelCase : Optional[Any] = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(lowercase_ ,lowercase_ ,lowercase_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_UpperCamelCase : List[Any] = file_path.read_text(encoding="utf-8" )
else:
_UpperCamelCase : List[str] = output_path.read_text(encoding="utf-8" )
_UpperCamelCase : Union[str, Any] = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowercase__ ( lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
import tarfile
_UpperCamelCase : List[str] = tmp_path / "data_dot_dot"
directory.mkdir()
_UpperCamelCase : Optional[int] = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(lowercase_ ,"w" ) as f:
f.add(lowercase_ ,arcname=os.path.join(".." ,text_file.name ) )
return path
@pytest.fixture
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
import tarfile
_UpperCamelCase : str = tmp_path / "data_sym_link"
directory.mkdir()
_UpperCamelCase : Union[str, Any] = directory / "tar_file_with_sym_link.tar"
os.symlink(".." ,directory / "subdir" ,target_is_directory=lowercase_ )
with tarfile.TarFile(lowercase_ ,"w" ) as f:
f.add(str(directory / "subdir" ) ,arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" ,[("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] ,)
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
_UpperCamelCase : List[Any] = insecure_tar_files[insecure_tar_file]
_UpperCamelCase : Optional[Any] = tmp_path / "extracted"
TarExtractor.extract(lowercase_ ,lowercase_ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : int = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
_UpperCamelCase : Optional[int] = (
b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
b"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
b"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
b"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(lowercase_ )
assert zipfile.is_zipfile(str(lowercase_ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(lowercase_ ) # but we're right
| 310
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCamelCase__ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __SCREAMING_SNAKE_CASE ( unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = load_tool("text-question-answering" )
self.tool.setup()
_UpperCamelCase : Union[str, Any] = load_tool("text-question-answering" , remote=__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Dict = self.tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.remote_tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Dict = self.tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : List[Any] = self.remote_tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
| 310
| 1
|
"""simple docstring"""
from typing import Any
def lowercase__ ( lowercase_ ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
_UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list]
_UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
"""simple docstring"""
lowerCamelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Tuple = [False] * len(lowercase_ )
_UpperCamelCase : Dict = [s]
_UpperCamelCase : List[str] = True
while queue:
_UpperCamelCase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase_ )
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : List[str] = u
return visited[t]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = [-1] * (len(lowercase_ ))
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : str = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ):
_UpperCamelCase : int = float("Inf" )
_UpperCamelCase : Optional[Any] = sink
while s != source:
# Find the minimum value in select path
_UpperCamelCase : List[Any] = min(lowercase_ ,graph[parent[s]][s] )
_UpperCamelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_UpperCamelCase : Union[str, Any] = sink
while v != source:
_UpperCamelCase : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCamelCase : Dict = parent[v]
for i in range(len(lowercase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 310
| 1
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=True ,lowercase_="pt" ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = {"add_prefix_space": True} if isinstance(lowercase_ ,lowercase_ ) and not line.startswith(" " ) else {}
_UpperCamelCase : int = padding_side
return tokenizer(
[line] ,max_length=lowercase_ ,padding="max_length" if pad_to_max_length else None ,truncation=lowercase_ ,return_tensors=lowercase_ ,add_special_tokens=lowercase_ ,**lowercase_ ,)
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,) -> Any:
"""simple docstring"""
_UpperCamelCase : Any = input_ids.ne(lowercase_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : List[str] , __a : int , __a : int , __a : Optional[Any] , __a : Tuple="train" , __a : Dict=None , __a : Dict=None , __a : Optional[int]=None , __a : Optional[Any]="" , ) -> Optional[Any]:
super().__init__()
_UpperCamelCase : Optional[Any] = Path(__a ).joinpath(type_path + ".source" )
_UpperCamelCase : List[Any] = Path(__a ).joinpath(type_path + ".target" )
_UpperCamelCase : Optional[Any] = self.get_char_lens(self.src_file )
_UpperCamelCase : Dict = max_source_length
_UpperCamelCase : List[str] = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
_UpperCamelCase : Any = tokenizer
_UpperCamelCase : str = prefix
if n_obs is not None:
_UpperCamelCase : Dict = self.src_lens[:n_obs]
_UpperCamelCase : Optional[int] = src_lang
_UpperCamelCase : Optional[Any] = tgt_lang
def __len__( self : Union[str, Any] ) -> Union[str, Any]:
return len(self.src_lens )
def __getitem__( self : Tuple , __a : Union[str, Any] ) -> Dict[str, torch.Tensor]:
_UpperCamelCase : Tuple = index + 1 # linecache starts at 1
_UpperCamelCase : Tuple = self.prefix + linecache.getline(str(self.src_file ) , __a ).rstrip("\n" )
_UpperCamelCase : List[str] = linecache.getline(str(self.tgt_file ) , __a ).rstrip("\n" )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCamelCase : Tuple = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __a ) else self.tokenizer
)
_UpperCamelCase : Any = self.tokenizer.generator if isinstance(self.tokenizer , __a ) else self.tokenizer
_UpperCamelCase : str = encode_line(__a , __a , self.max_source_length , "right" )
_UpperCamelCase : List[Any] = encode_line(__a , __a , self.max_target_length , "right" )
_UpperCamelCase : List[Any] = source_inputs["input_ids"].squeeze()
_UpperCamelCase : Optional[int] = target_inputs["input_ids"].squeeze()
_UpperCamelCase : List[str] = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : List[str] ) -> Optional[Any]:
return [len(__a ) for x in Path(__a ).open().readlines()]
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Dict ) -> Dict[str, torch.Tensor]:
_UpperCamelCase : Optional[int] = torch.stack([x["input_ids"] for x in batch] )
_UpperCamelCase : List[str] = torch.stack([x["attention_mask"] for x in batch] )
_UpperCamelCase : Any = torch.stack([x["decoder_input_ids"] for x in batch] )
_UpperCamelCase : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __a )
else self.tokenizer.pad_token_id
)
_UpperCamelCase : int = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __a )
else self.tokenizer.pad_token_id
)
_UpperCamelCase : Dict = trim_batch(__a , __a )
_UpperCamelCase, _UpperCamelCase : List[Any] = trim_batch(__a , __a , attention_mask=__a )
_UpperCamelCase : int = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowerCamelCase__ = getLogger(__name__)
def lowercase__ ( lowercase_ ) -> Dict:
"""simple docstring"""
return list(itertools.chain.from_iterable(lowercase_ ) )
def lowercase__ ( lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase : Tuple = get_git_info()
save_json(lowercase_ ,os.path.join(lowercase_ ,"git_log.json" ) )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=4 ,**lowercase_ ) -> List[Any]:
"""simple docstring"""
with open(lowercase_ ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ ,indent=lowercase_ ,**lowercase_ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
with open(lowercase_ ) as f:
return json.load(lowercase_ )
def lowercase__ ( ) -> Any:
"""simple docstring"""
_UpperCamelCase : int = git.Repo(search_parent_directories=lowercase_ )
_UpperCamelCase : str = {
"repo_id": str(lowercase_ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def lowercase__ ( lowercase_ ,lowercase_ ) -> List:
"""simple docstring"""
return list(map(lowercase_ ,lowercase_ ) )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
with open(lowercase_ ,"wb" ) as f:
return pickle.dump(lowercase_ ,lowercase_ )
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
def remove_articles(lowercase_ ):
return re.sub(r"\b(a|an|the)\b" ," " ,lowercase_ )
def white_space_fix(lowercase_ ):
return " ".join(text.split() )
def remove_punc(lowercase_ ):
_UpperCamelCase : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase_ ) ) ) )
def lowercase__ ( lowercase_ ,lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : int = normalize_answer(lowercase_ ).split()
_UpperCamelCase : List[str] = normalize_answer(lowercase_ ).split()
_UpperCamelCase : Dict = Counter(lowercase_ ) & Counter(lowercase_ )
_UpperCamelCase : List[Any] = sum(common.values() )
if num_same == 0:
return 0
_UpperCamelCase : Optional[int] = 1.0 * num_same / len(lowercase_ )
_UpperCamelCase : Dict = 1.0 * num_same / len(lowercase_ )
_UpperCamelCase : int = (2 * precision * recall) / (precision + recall)
return fa
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
return normalize_answer(lowercase_ ) == normalize_answer(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
assert len(lowercase_ ) == len(lowercase_ )
_UpperCamelCase : Optional[int] = 0
for hypo, pred in zip(lowercase_ ,lowercase_ ):
em += exact_match_score(lowercase_ ,lowercase_ )
if len(lowercase_ ) > 0:
em /= len(lowercase_ )
return {"em": em}
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
return model_prefix.startswith("rag" )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCamelCase : Dict = "dropout_rate"
for p in extra_params:
if getattr(lowercase_ ,lowercase_ ,lowercase_ ):
if not hasattr(lowercase_ ,lowercase_ ) and not hasattr(lowercase_ ,equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(lowercase_ ) )
delattr(lowercase_ ,lowercase_ )
continue
_UpperCamelCase : int = p if hasattr(lowercase_ ,lowercase_ ) else equivalent_param[p]
setattr(lowercase_ ,lowercase_ ,getattr(lowercase_ ,lowercase_ ) )
delattr(lowercase_ ,lowercase_ )
return hparams, config
| 310
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(lowercase_ ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase_ ,(list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase_ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ["pixel_values"]
def __init__( self : List[str] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : List[Any] , ) -> None:
super().__init__(**__a )
_UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 256}
_UpperCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : int = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCamelCase : Optional[Any] = get_size_dict(__a , param_name="crop_size" )
_UpperCamelCase : str = do_resize
_UpperCamelCase : Dict = size
_UpperCamelCase : int = do_center_crop
_UpperCamelCase : int = crop_size
_UpperCamelCase : Optional[Any] = resample
_UpperCamelCase : Dict = do_rescale
_UpperCamelCase : Any = rescale_factor
_UpperCamelCase : Any = offset
_UpperCamelCase : Union[str, Any] = do_normalize
_UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __SCREAMING_SNAKE_CASE ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray:
_UpperCamelCase : Any = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" in size:
_UpperCamelCase : str = get_resize_output_image_size(__a , size["shortest_edge"] , default_to_square=__a )
elif "height" in size and "width" in size:
_UpperCamelCase : Any = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ) -> np.ndarray:
_UpperCamelCase : List[Any] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Union[int, float] , __a : bool = True , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> Optional[Any]:
_UpperCamelCase : Any = image.astype(np.floataa )
if offset:
_UpperCamelCase : Dict = image - (scale / 2)
return rescale(__a , scale=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
_UpperCamelCase : Optional[Any] = to_numpy_array(__a )
if do_resize:
_UpperCamelCase : Any = self.resize(image=__a , size=__a , resample=__a )
if do_center_crop:
_UpperCamelCase : Dict = self.center_crop(__a , size=__a )
if do_rescale:
_UpperCamelCase : Union[str, Any] = self.rescale(image=__a , scale=__a , offset=__a )
if do_normalize:
_UpperCamelCase : int = self.normalize(image=__a , mean=__a , std=__a )
_UpperCamelCase : str = to_channel_dimension_format(__a , __a )
return image
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
_UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Optional[int] = resample if resample is not None else self.resample
_UpperCamelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : str = offset if offset is not None else self.offset
_UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : Tuple = image_std if image_std is not None else self.image_std
_UpperCamelCase : int = size if size is not None else self.size
_UpperCamelCase : Tuple = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase : Optional[int] = get_size_dict(__a , param_name="crop_size" )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
_UpperCamelCase : Union[str, Any] = make_batched(__a )
_UpperCamelCase : Optional[Any] = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
_UpperCamelCase : List[Any] = {"pixel_values": videos}
return BatchFeature(data=__a , tensor_type=__a )
| 310
| 1
|
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = "xlnet"
SCREAMING_SNAKE_CASE__ :List[Any] = ["mems"]
SCREAMING_SNAKE_CASE__ :Any = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , __a : Union[str, Any]=3_2000 , __a : Optional[int]=1024 , __a : List[str]=24 , __a : Optional[int]=16 , __a : Tuple=4096 , __a : Optional[Any]="gelu" , __a : Tuple=True , __a : str="bi" , __a : str=0.02 , __a : Dict=1e-1_2 , __a : Optional[int]=0.1 , __a : List[Any]=512 , __a : Any=None , __a : List[str]=True , __a : str=False , __a : int=False , __a : Any=-1 , __a : int=False , __a : Dict="last" , __a : Tuple=True , __a : Any="tanh" , __a : Optional[int]=0.1 , __a : str=5 , __a : Tuple=5 , __a : Dict=5 , __a : Optional[Any]=1 , __a : List[str]=2 , **__a : str , ) -> Dict:
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Optional[int] = d_model
_UpperCamelCase : int = n_layer
_UpperCamelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
_UpperCamelCase : str = d_model // n_head
_UpperCamelCase : List[Any] = ff_activation
_UpperCamelCase : str = d_inner
_UpperCamelCase : Any = untie_r
_UpperCamelCase : List[Any] = attn_type
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : Dict = layer_norm_eps
_UpperCamelCase : str = dropout
_UpperCamelCase : int = mem_len
_UpperCamelCase : Union[str, Any] = reuse_len
_UpperCamelCase : int = bi_data
_UpperCamelCase : List[str] = clamp_len
_UpperCamelCase : List[Any] = same_length
_UpperCamelCase : Union[str, Any] = summary_type
_UpperCamelCase : str = summary_use_proj
_UpperCamelCase : Any = summary_activation
_UpperCamelCase : Dict = summary_last_dropout
_UpperCamelCase : Optional[Any] = start_n_top
_UpperCamelCase : Tuple = end_n_top
_UpperCamelCase : Any = bos_token_id
_UpperCamelCase : Union[str, Any] = pad_token_id
_UpperCamelCase : int = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , __a , )
_UpperCamelCase : List[str] = kwargs["use_cache"]
_UpperCamelCase : str = use_mems_eval
_UpperCamelCase : str = use_mems_train
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[str] ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 310
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCamelCase__ = True
except ImportError:
lowerCamelCase__ = False
try:
from torch.hub import _get_torch_home
lowerCamelCase__ = _get_torch_home()
except ImportError:
lowerCamelCase__ = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
lowerCamelCase__ = os.path.join(torch_cache_home, "transformers")
lowerCamelCase__ = "https://cdn.huggingface.co"
lowerCamelCase__ = "https://s3.amazonaws.com/models.huggingface.co/bert"
lowerCamelCase__ = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
lowerCamelCase__ = os.path.join(PATH, "config.yaml")
lowerCamelCase__ = os.path.join(PATH, "attributes.txt")
lowerCamelCase__ = os.path.join(PATH, "objects.txt")
lowerCamelCase__ = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
lowerCamelCase__ = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
lowerCamelCase__ = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
lowerCamelCase__ = "pytorch_model.bin"
lowerCamelCase__ = "config.yaml"
def lowercase__ ( lowercase_=OBJECTS ,lowercase_=ATTRIBUTES ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
_UpperCamelCase : Any = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = OrderedDict()
with open(lowercase_ ,"rb" ) as f:
_UpperCamelCase : List[str] = pkl.load(lowercase_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
_UpperCamelCase : List[str] = ckp.pop(lowercase_ )
if isinstance(lowercase_ ,np.ndarray ):
_UpperCamelCase : List[Any] = torch.tensor(lowercase_ )
else:
assert isinstance(lowercase_ ,torch.tensor ), type(lowercase_ )
_UpperCamelCase : Optional[Any] = v
return r
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = {}
def __init__( self : str , __a : dict , __a : str = "root" , __a : Any=0 ) -> Any:
_UpperCamelCase : Optional[Any] = name
_UpperCamelCase : Optional[Any] = level
_UpperCamelCase : Union[str, Any] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_UpperCamelCase : Optional[int] = copy.deepcopy(__a )
_UpperCamelCase : Dict = copy.deepcopy(__a )
if isinstance(__a , __a ):
_UpperCamelCase : Union[str, Any] = Config(__a , name=__a , level=level + 1 )
_UpperCamelCase : Optional[Any] = v
setattr(self , __a , __a )
_UpperCamelCase : Optional[Any] = d
def __repr__( self : List[str] ) -> List[Any]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Dict , __a : Union[str, Any] , __a : Optional[int] ) -> int:
_UpperCamelCase : Any = val
_UpperCamelCase : Optional[Any] = val
_UpperCamelCase : Dict = key.split("." )
_UpperCamelCase : int = len(__a ) - 1
_UpperCamelCase : List[str] = self._pointer
if len(__a ) > 1:
for i, l in enumerate(__a ):
if hasattr(self , __a ) and isinstance(getattr(self , __a ) , __a ):
setattr(getattr(self , __a ) , ".".join(levels[i:] ) , __a )
if l == last_level:
_UpperCamelCase : str = val
else:
_UpperCamelCase : List[str] = pointer[l]
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._pointer
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Tuple , __a : List[str] ) -> Dict:
with open(F'''{file_name}''' , "w" ) as stream:
dump(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[Any] , __a : Dict ) -> List[Any]:
with open(F'''{file_name}''' , "w" ) as stream:
json.dump(__a , __a )
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : Union[str, Any] ) -> Optional[int]:
with open(__a ) as stream:
_UpperCamelCase : int = load(__a , Loader=__a )
return data
def __str__( self : List[str] ) -> Tuple:
_UpperCamelCase : List[str] = " "
if self._name != "root":
_UpperCamelCase : Dict = F'''{t * (self._level-1)}{self._name}:\n'''
else:
_UpperCamelCase : Any = ""
_UpperCamelCase : Any = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__a , __a ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(__a ).__name__})\n'''
_UpperCamelCase : Optional[Any] = level
return r[:-1]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Dict , __a : str , **__a : str ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : int = cls.get_config_dict(__a , **__a )
return cls(__a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , __a : str , **__a : Union[str, Any] ) -> Tuple:
_UpperCamelCase : Tuple = kwargs.pop("cache_dir" , __a )
_UpperCamelCase : Optional[int] = kwargs.pop("force_download" , __a )
_UpperCamelCase : str = kwargs.pop("resume_download" , __a )
_UpperCamelCase : Any = kwargs.pop("proxies" , __a )
_UpperCamelCase : List[Any] = kwargs.pop("local_files_only" , __a )
if os.path.isdir(__a ):
_UpperCamelCase : Optional[Any] = os.path.join(__a , __a )
elif os.path.isfile(__a ) or is_remote_url(__a ):
_UpperCamelCase : Optional[int] = pretrained_model_name_or_path
else:
_UpperCamelCase : int = hf_bucket_url(__a , filename=__a , use_cdn=__a )
try:
# Load from URL or cache if already cached
_UpperCamelCase : Optional[int] = cached_path(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_UpperCamelCase : List[Any] = Config.load_yaml(__a )
except EnvironmentError:
_UpperCamelCase : Union[str, Any] = "Can't load config for"
raise EnvironmentError(__a )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(__a ), kwargs
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : str = torch.load("dump.pt" ,map_location=in_tensor.device )
_UpperCamelCase : str = in_tensor.numpy()
_UpperCamelCase : Union[str, Any] = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ), (
F'''{sum([1 for x in np.isclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Dict = urlparse(lowercase_ )
return parsed.scheme in ("http", "https")
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=True ) -> str:
"""simple docstring"""
_UpperCamelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_UpperCamelCase : List[str] = "/" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=0 ,lowercase_=None ,) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowercase_ ,lowercase_ ):
ua += "; " + "; ".join("{}/{}".format(lowercase_ ,lowercase_ ) for k, v in user_agent.items() )
elif isinstance(lowercase_ ,lowercase_ ):
ua += "; " + user_agent
_UpperCamelCase : Any = {"user-agent": ua}
if resume_size > 0:
_UpperCamelCase : str = "bytes=%d-" % (resume_size,)
_UpperCamelCase : str = requests.get(lowercase_ ,stream=lowercase_ ,proxies=lowercase_ ,headers=lowercase_ )
if response.status_code == 416: # Range not satisfiable
return
_UpperCamelCase : List[str] = response.headers.get("Content-Length" )
_UpperCamelCase : Union[str, Any] = resume_size + int(lowercase_ ) if content_length is not None else None
_UpperCamelCase : Optional[int] = tqdm(
unit="B" ,unit_scale=lowercase_ ,total=lowercase_ ,initial=lowercase_ ,desc="Downloading" ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowercase_ ) )
temp_file.write(lowercase_ )
progress.close()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=10 ,lowercase_=False ,lowercase_=None ,lowercase_=False ,) -> Tuple:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : str = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : Dict = str(lowercase_ )
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
_UpperCamelCase : Dict = None
if not local_files_only:
try:
_UpperCamelCase : List[Any] = requests.head(lowercase_ ,allow_redirects=lowercase_ ,proxies=lowercase_ ,timeout=lowercase_ )
if response.status_code == 200:
_UpperCamelCase : str = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_UpperCamelCase : int = url_to_filename(lowercase_ ,lowercase_ )
# get cache path to put the file
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowercase_ ):
return cache_path
else:
_UpperCamelCase : Optional[int] = [
file
for file in fnmatch.filter(os.listdir(lowercase_ ) ,filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(lowercase_ ) > 0:
return os.path.join(lowercase_ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(lowercase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_UpperCamelCase : Dict = cache_path + ".lock"
with FileLock(lowercase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowercase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_UpperCamelCase : List[str] = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(lowercase_ ,"a+b" ) as f:
yield f
_UpperCamelCase : Union[str, Any] = _resumable_file_manager
if os.path.exists(lowercase_ ):
_UpperCamelCase : str = os.stat(lowercase_ ).st_size
else:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Tuple = partial(tempfile.NamedTemporaryFile ,dir=lowercase_ ,delete=lowercase_ )
_UpperCamelCase : Optional[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" ,lowercase_ ,temp_file.name ,)
http_get(
lowercase_ ,lowercase_ ,proxies=lowercase_ ,resume_size=lowercase_ ,user_agent=lowercase_ ,)
os.replace(temp_file.name ,lowercase_ )
_UpperCamelCase : Optional[int] = {"url": url, "etag": etag}
_UpperCamelCase : List[str] = cache_path + ".json"
with open(lowercase_ ,"w" ) as meta_file:
json.dump(lowercase_ ,lowercase_ )
return cache_path
def lowercase__ ( lowercase_ ,lowercase_=None ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[int] = url.encode("utf-8" )
_UpperCamelCase : List[str] = shaaaa(lowercase_ )
_UpperCamelCase : List[str] = url_hash.hexdigest()
if etag:
_UpperCamelCase : Optional[Any] = etag.encode("utf-8" )
_UpperCamelCase : Optional[Any] = shaaaa(lowercase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=False ,lowercase_=False ,) -> str:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : List[Any] = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if is_remote_url(lowercase_ ):
# URL, so get it from the cache (downloading if necessary)
_UpperCamelCase : Union[str, Any] = get_from_cache(
lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,proxies=lowercase_ ,resume_download=lowercase_ ,user_agent=lowercase_ ,local_files_only=lowercase_ ,)
elif os.path.exists(lowercase_ ):
# File, and it exists.
_UpperCamelCase : List[str] = url_or_filename
elif urlparse(lowercase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(lowercase_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(lowercase_ ) )
if extract_compressed_file:
if not is_zipfile(lowercase_ ) and not tarfile.is_tarfile(lowercase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_UpperCamelCase, _UpperCamelCase : Any = os.path.split(lowercase_ )
_UpperCamelCase : Optional[int] = output_file.replace("." ,"-" ) + "-extracted"
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
if os.path.isdir(lowercase_ ) and os.listdir(lowercase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_UpperCamelCase : Optional[int] = output_path + ".lock"
with FileLock(lowercase_ ):
shutil.rmtree(lowercase_ ,ignore_errors=lowercase_ )
os.makedirs(lowercase_ )
if is_zipfile(lowercase_ ):
with ZipFile(lowercase_ ,"r" ) as zip_file:
zip_file.extractall(lowercase_ )
zip_file.close()
elif tarfile.is_tarfile(lowercase_ ):
_UpperCamelCase : int = tarfile.open(lowercase_ )
tar_file.extractall(lowercase_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(lowercase_ ) )
return output_path_extracted
return output_path
def lowercase__ ( lowercase_ ,lowercase_="," ) -> Optional[int]:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
with open(lowercase_ ) as f:
_UpperCamelCase : Tuple = eval(f.read() )
else:
_UpperCamelCase : str = requests.get(lowercase_ )
try:
_UpperCamelCase : Optional[int] = requests.json()
except Exception:
_UpperCamelCase : Union[str, Any] = req.content.decode()
assert data is not None, "could not connect"
try:
_UpperCamelCase : List[Any] = eval(lowercase_ )
except Exception:
_UpperCamelCase : int = data.split("\n" )
req.close()
return data
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : List[Any] = requests.get(lowercase_ )
_UpperCamelCase : Optional[int] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : List[Any] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowercase_ )
with open(lowercase_ ,"rb" ) as stream:
_UpperCamelCase : Union[str, Any] = pkl.load(lowercase_ )
_UpperCamelCase : Union[str, Any] = weights.pop("model" )
_UpperCamelCase : Optional[int] = {}
for k, v in model.items():
_UpperCamelCase : str = torch.from_numpy(lowercase_ )
if "running_var" in k:
_UpperCamelCase : List[Any] = torch.tensor([0] )
_UpperCamelCase : str = k.replace("running_var" ,"num_batches_tracked" )
_UpperCamelCase : Any = zero
return new
def lowercase__ ( ) -> Dict:
"""simple docstring"""
print(F'''{os.path.abspath(os.path.join(lowercase_ ,os.pardir ) )}/demo.ipynb''' )
def lowercase__ ( lowercase_ ,lowercase_="RGB" ) -> int:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : Optional[Any] = cva.imread(lowercase_ )
else:
_UpperCamelCase : Optional[int] = get_image_from_url(lowercase_ )
assert img is not None, F'''could not connect to: {im}'''
_UpperCamelCase : Optional[int] = cva.cvtColor(lowercase_ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
_UpperCamelCase : List[Any] = img[:, :, ::-1]
return img
def lowercase__ ( lowercase_ ,lowercase_=1 ) -> List[Any]:
"""simple docstring"""
return (images[i : i + batch] for i in range(0 ,len(lowercase_ ) ,lowercase_ ))
| 310
| 1
|
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE__ :List[Any] = "OwlViTImageProcessor"
SCREAMING_SNAKE_CASE__ :Optional[Any] = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Optional[Any] , __a : str=None , __a : Any=None , **__a : List[str] ) -> int:
_UpperCamelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_UpperCamelCase : int = kwargs.pop("feature_extractor" )
_UpperCamelCase : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : List[str] , __a : Any=None , __a : int=None , __a : Any=None , __a : Dict="max_length" , __a : str="np" , **__a : Dict ) -> Any:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(__a , __a ) or (isinstance(__a , __a ) and not isinstance(text[0] , __a )):
_UpperCamelCase : Optional[int] = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )]
elif isinstance(__a , __a ) and isinstance(text[0] , __a ):
_UpperCamelCase : int = []
# Maximum number of queries across batch
_UpperCamelCase : Optional[int] = max([len(__a ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__a ) != max_num_queries:
_UpperCamelCase : Optional[Any] = t + [" "] * (max_num_queries - len(__a ))
_UpperCamelCase : Dict = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )
encodings.append(__a )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_UpperCamelCase : Optional[int] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCamelCase : Any = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_UpperCamelCase : List[str] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCamelCase : Dict = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_UpperCamelCase : Optional[int] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_UpperCamelCase : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_UpperCamelCase : Tuple = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCamelCase : Optional[int] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_UpperCamelCase : List[Any] = BatchEncoding()
_UpperCamelCase : Optional[Any] = input_ids
_UpperCamelCase : Union[str, Any] = attention_mask
if query_images is not None:
_UpperCamelCase : List[Any] = BatchEncoding()
_UpperCamelCase : str = self.image_processor(
__a , return_tensors=__a , **__a ).pixel_values
_UpperCamelCase : Tuple = query_pixel_values
if images is not None:
_UpperCamelCase : Optional[int] = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
_UpperCamelCase : Any = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_UpperCamelCase : List[Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] , *__a : Optional[int] , **__a : int ) -> Tuple:
return self.image_processor.post_process(*__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Any , *__a : int , **__a : List[Any] ) -> Optional[Any]:
return self.image_processor.post_process_object_detection(*__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Tuple , *__a : int , **__a : Any ) -> Any:
return self.image_processor.post_process_image_guided_detection(*__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , *__a : Optional[Any] , **__a : List[str] ) -> Tuple:
return self.tokenizer.batch_decode(*__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , *__a : int , **__a : List[str] ) -> Optional[Any]:
return self.tokenizer.decode(*__a , **__a )
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 310
|
"""simple docstring"""
import torch
from transformers import AutoModel
class __SCREAMING_SNAKE_CASE ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple="sayef/fsner-bert-base-uncased" ) -> Dict:
super(__a , self ).__init__()
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained(__a , return_dict=__a )
_UpperCamelCase : str = torch.nn.CosineSimilarity(3 , 1e-0_8 )
_UpperCamelCase : List[str] = torch.nn.Softmax(dim=1 )
def __SCREAMING_SNAKE_CASE ( self : int , **__a : Tuple ) -> Optional[Any]:
return self.bert(**__a ).last_hidden_state
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Any , __a : List[Any] , __a : Tuple=1 ) -> List[Any]:
return self.softmax(T * self.cos(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Dict ) -> Union[str, Any]:
_UpperCamelCase : str = W_supports["sizes"].tolist()
_UpperCamelCase : Any = W_supports["start_token_id"].item()
_UpperCamelCase : Optional[Any] = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_UpperCamelCase : str = self.BERT(**__a )
_UpperCamelCase : int = self.BERT(**__a )
_UpperCamelCase : int = None
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : List[Any] = W_supports["input_ids"] == start_token_id
_UpperCamelCase : Optional[int] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Any = support_sizes[i - 1]
_UpperCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]]
_UpperCamelCase : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_UpperCamelCase : List[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_UpperCamelCase : Any = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_UpperCamelCase : Any = torch.vstack((p_starts, p_start) )
_UpperCamelCase : Any = torch.vstack((p_ends, p_end) )
else:
_UpperCamelCase : Optional[Any] = p_start
_UpperCamelCase : str = p_end
return p_starts, p_ends
| 310
| 1
|
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowerCamelCase__ = logging.getLogger(__name__)
def lowercase__ ( lowercase_=2 ,lowercase_=3 ,lowercase_=16 ,lowercase_ = 10 ,lowercase_ = 2 ) -> Dict:
"""simple docstring"""
def get_dataset(lowercase_ ):
_UpperCamelCase : Optional[int] = torch.randn(batch_size * n_batches ,1 )
return TensorDataset(lowercase_ ,a * x + b + 0.1 * torch.randn(batch_size * n_batches ,1 ) )
_UpperCamelCase : Dict = get_dataset(lowercase_ )
_UpperCamelCase : List[str] = get_dataset(lowercase_ )
_UpperCamelCase : Union[str, Any] = DataLoader(lowercase_ ,shuffle=lowercase_ ,batch_size=lowercase_ ,num_workers=4 )
_UpperCamelCase : str = DataLoader(lowercase_ ,shuffle=lowercase_ ,batch_size=lowercase_ ,num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ) -> int:
"""simple docstring"""
_UpperCamelCase : Tuple = []
for epoch in range(lowercase_ ):
# Train quickly
model.train()
for batch in dataloader:
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = batch
_UpperCamelCase : Tuple = model(lowercase_ )
_UpperCamelCase : Any = torch.nn.functional.mse_loss(lowercase_ ,lowercase_ )
accelerator.backward(lowercase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ) -> Any:
super().__init__()
_UpperCamelCase : Any = nn.Parameter(torch.randn(1 ) )
_UpperCamelCase : Optional[Any] = nn.Parameter(torch.randn(1 ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : str ) -> int:
return x * self.a + self.b
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_UpperCamelCase : List[str] = DummyModel()
_UpperCamelCase : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_UpperCamelCase, _UpperCamelCase : str = dummy_dataloaders()
_UpperCamelCase : str = ProjectConfiguration(total_limit=1 , project_dir=__a , automatic_checkpoint_naming=__a )
# Train baseline
_UpperCamelCase : Tuple = Accelerator(project_config=__a )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Any = accelerator.prepare(
__a , __a , __a , __a )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_UpperCamelCase : Any = DummyModel()
_UpperCamelCase : str = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_UpperCamelCase, _UpperCamelCase : List[str] = dummy_dataloaders()
# Train baseline
_UpperCamelCase : Optional[int] = Accelerator()
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Any = accelerator.prepare(
__a , __a , __a , __a )
# Save initial
_UpperCamelCase : List[str] = os.path.join(__a , "initial" )
accelerator.save_state(__a )
((_UpperCamelCase), (_UpperCamelCase)) : List[str] = model.a.item(), model.b.item()
_UpperCamelCase : List[str] = optimizer.state_dict()
_UpperCamelCase : Tuple = train(3 , __a , __a , __a , __a )
((_UpperCamelCase), (_UpperCamelCase)) : Any = model.a.item(), model.b.item()
_UpperCamelCase : Dict = optimizer.state_dict()
# Train partially
set_seed(42 )
_UpperCamelCase : Optional[int] = DummyModel()
_UpperCamelCase : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_UpperCamelCase, _UpperCamelCase : str = dummy_dataloaders()
_UpperCamelCase : Union[str, Any] = Accelerator()
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Tuple = accelerator.prepare(
__a , __a , __a , __a )
accelerator.load_state(__a )
((_UpperCamelCase), (_UpperCamelCase)) : str = model.a.item(), model.b.item()
_UpperCamelCase : Dict = optimizer.state_dict()
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
_UpperCamelCase : Union[str, Any] = train(2 , __a , __a , __a , __a )
# Save everything
_UpperCamelCase : List[str] = os.path.join(__a , "checkpoint" )
accelerator.save_state(__a )
# Load everything back in and make sure all states work
accelerator.load_state(__a )
test_rands += train(1 , __a , __a , __a , __a )
((_UpperCamelCase), (_UpperCamelCase)) : Any = model.a.item(), model.b.item()
_UpperCamelCase : Optional[int] = optimizer.state_dict()
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_UpperCamelCase : Union[str, Any] = DummyModel()
_UpperCamelCase : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_UpperCamelCase, _UpperCamelCase : Optional[int] = dummy_dataloaders()
_UpperCamelCase : int = ProjectConfiguration(automatic_checkpoint_naming=__a )
# Train baseline
_UpperCamelCase : Optional[int] = Accelerator(project_dir=__a , project_config=__a )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = accelerator.prepare(
__a , __a , __a , __a )
# Save initial
accelerator.save_state()
((_UpperCamelCase), (_UpperCamelCase)) : List[Any] = model.a.item(), model.b.item()
_UpperCamelCase : Optional[int] = optimizer.state_dict()
_UpperCamelCase : List[str] = train(3 , __a , __a , __a , __a )
((_UpperCamelCase), (_UpperCamelCase)) : str = model.a.item(), model.b.item()
_UpperCamelCase : List[Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
_UpperCamelCase : Optional[Any] = DummyModel()
_UpperCamelCase : int = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_UpperCamelCase, _UpperCamelCase : List[str] = dummy_dataloaders()
_UpperCamelCase : List[str] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=__a )
_UpperCamelCase : Dict = Accelerator(project_dir=__a , project_config=__a )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = accelerator.prepare(
__a , __a , __a , __a )
accelerator.load_state(os.path.join(__a , "checkpoints" , "checkpoint_0" ) )
((_UpperCamelCase), (_UpperCamelCase)) : Tuple = model.a.item(), model.b.item()
_UpperCamelCase : Optional[Any] = optimizer.state_dict()
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
_UpperCamelCase : List[str] = train(2 , __a , __a , __a , __a )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__a , "checkpoints" , "checkpoint_1" ) )
test_rands += train(1 , __a , __a , __a , __a )
((_UpperCamelCase), (_UpperCamelCase)) : Optional[Any] = model.a.item(), model.b.item()
_UpperCamelCase : List[str] = optimizer.state_dict()
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
_UpperCamelCase : Dict = torch.tensor([1, 2, 3] )
_UpperCamelCase : Dict = torch.tensor([2, 3, 4] )
_UpperCamelCase : int = DummyModel()
_UpperCamelCase : Optional[int] = torch.optim.Adam(net.parameters() )
_UpperCamelCase : Any = Accelerator()
with self.assertRaises(__a ) as ve:
accelerator.register_for_checkpointing(__a , __a , __a , __a )
_UpperCamelCase : Optional[int] = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_UpperCamelCase : Tuple = DummyModel()
_UpperCamelCase : str = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_UpperCamelCase : Any = torch.optim.lr_scheduler.StepLR(__a , step_size=1 , gamma=0.99 )
_UpperCamelCase, _UpperCamelCase : Optional[Any] = dummy_dataloaders()
_UpperCamelCase : int = ProjectConfiguration(automatic_checkpoint_naming=__a )
# Train baseline
_UpperCamelCase : Tuple = Accelerator(project_dir=__a , project_config=__a )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = accelerator.prepare(
__a , __a , __a , __a , __a )
# Save initial
accelerator.save_state()
_UpperCamelCase : Union[str, Any] = scheduler.state_dict()
train(3 , __a , __a , __a , __a , __a )
self.assertNotEqual(__a , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__a , "checkpoints" , "checkpoint_0" ) )
self.assertEqual(__a , scheduler.state_dict() )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_UpperCamelCase : Optional[Any] = DummyModel()
_UpperCamelCase : Dict = ProjectConfiguration(automatic_checkpoint_naming=__a , total_limit=2 )
# Train baseline
_UpperCamelCase : Optional[int] = Accelerator(project_dir=__a , project_config=__a )
_UpperCamelCase : int = accelerator.prepare(__a )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__a , "checkpoints" , "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "checkpoints" , "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "checkpoints" , "checkpoint_10" ) ) )
@require_cuda
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__a , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase__ = "/tmp/accelerate/state_checkpointing"
lowerCamelCase__ = DummyModel()
lowerCamelCase__ = torch.optim.Adam(params=model.parameters(), lr=1E-3)
lowerCamelCase__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
lowerCamelCase__ , lowerCamelCase__ = dummy_dataloaders()
lowerCamelCase__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowerCamelCase__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowerCamelCase__ = group["params"][0].device
break
assert param_device.type == accelerator.device.type
lowerCamelCase__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
lowerCamelCase__ = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
lowerCamelCase__ = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 310
|
"""simple docstring"""
from typing import Any
def lowercase__ ( lowercase_ ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
_UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list]
_UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = "yolos"
def __init__( self : List[Any] , __a : Tuple=768 , __a : Union[str, Any]=12 , __a : int=12 , __a : Any=3072 , __a : Any="gelu" , __a : Optional[Any]=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=0.02 , __a : Union[str, Any]=1e-1_2 , __a : Dict=[512, 864] , __a : List[Any]=16 , __a : Any=3 , __a : Tuple=True , __a : List[Any]=100 , __a : Tuple=True , __a : List[str]=False , __a : str=1 , __a : Tuple=5 , __a : List[str]=2 , __a : List[str]=5 , __a : List[Any]=2 , __a : Tuple=0.1 , **__a : Union[str, Any] , ) -> str:
super().__init__(**__a )
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Dict = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : List[str] = attention_probs_dropout_prob
_UpperCamelCase : int = initializer_range
_UpperCamelCase : Tuple = layer_norm_eps
_UpperCamelCase : Optional[Any] = image_size
_UpperCamelCase : Optional[Any] = patch_size
_UpperCamelCase : Union[str, Any] = num_channels
_UpperCamelCase : int = qkv_bias
_UpperCamelCase : Optional[Any] = num_detection_tokens
_UpperCamelCase : str = use_mid_position_embeddings
_UpperCamelCase : int = auxiliary_loss
# Hungarian matcher
_UpperCamelCase : Optional[int] = class_cost
_UpperCamelCase : Tuple = bbox_cost
_UpperCamelCase : List[str] = giou_cost
# Loss coefficients
_UpperCamelCase : int = bbox_loss_coefficient
_UpperCamelCase : Dict = giou_loss_coefficient
_UpperCamelCase : Dict = eos_coefficient
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> float:
return 1e-4
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return 12
| 310
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCamelCase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "rag"
SCREAMING_SNAKE_CASE__ :List[str] = True
def __init__( self : List[Any] , __a : Optional[Any]=None , __a : str=True , __a : Tuple=None , __a : Dict=None , __a : Optional[int]=None , __a : Optional[int]=None , __a : List[Any]=None , __a : Dict=" / " , __a : int=" // " , __a : Optional[Any]=5 , __a : Dict=300 , __a : Optional[int]=768 , __a : Tuple=8 , __a : Union[str, Any]="wiki_dpr" , __a : Dict="train" , __a : List[Any]="compressed" , __a : str=None , __a : Tuple=None , __a : int=False , __a : str=False , __a : Optional[int]=0.0 , __a : Dict=True , __a : Tuple=False , __a : Dict=False , __a : str=False , __a : str=True , __a : Optional[Any]=None , **__a : Tuple , ) -> Any:
super().__init__(
bos_token_id=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , is_encoder_decoder=__a , prefix=__a , vocab_size=__a , **__a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_UpperCamelCase : Optional[int] = kwargs.pop("question_encoder" )
_UpperCamelCase : str = question_encoder_config.pop("model_type" )
_UpperCamelCase : Tuple = kwargs.pop("generator" )
_UpperCamelCase : str = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_UpperCamelCase : Union[str, Any] = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : str = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : Optional[int] = reduce_loss
_UpperCamelCase : str = label_smoothing
_UpperCamelCase : int = exclude_bos_score
_UpperCamelCase : List[str] = do_marginalize
_UpperCamelCase : Optional[int] = title_sep
_UpperCamelCase : Optional[int] = doc_sep
_UpperCamelCase : Union[str, Any] = n_docs
_UpperCamelCase : Tuple = max_combined_length
_UpperCamelCase : Union[str, Any] = dataset
_UpperCamelCase : Any = dataset_split
_UpperCamelCase : List[str] = index_name
_UpperCamelCase : int = retrieval_vector_size
_UpperCamelCase : str = retrieval_batch_size
_UpperCamelCase : Dict = passages_path
_UpperCamelCase : str = index_path
_UpperCamelCase : Tuple = use_dummy_dataset
_UpperCamelCase : Union[str, Any] = output_retrieved
_UpperCamelCase : Optional[Any] = do_deduplication
_UpperCamelCase : str = use_cache
if self.forced_eos_token_id is None:
_UpperCamelCase : List[str] = getattr(self.generator , "forced_eos_token_id" , __a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Dict = copy.deepcopy(self.__dict__ )
_UpperCamelCase : List[Any] = self.question_encoder.to_dict()
_UpperCamelCase : Tuple = self.generator.to_dict()
_UpperCamelCase : Any = self.__class__.model_type
return output
| 310
| 1
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = "van"
def __init__( self : Any , __a : int=224 , __a : Union[str, Any]=3 , __a : Union[str, Any]=[7, 3, 3, 3] , __a : Any=[4, 2, 2, 2] , __a : Union[str, Any]=[64, 128, 320, 512] , __a : List[str]=[3, 3, 12, 3] , __a : Optional[int]=[8, 8, 4, 4] , __a : Dict="gelu" , __a : Dict=0.02 , __a : Union[str, Any]=1e-6 , __a : Union[str, Any]=1e-2 , __a : List[str]=0.0 , __a : Optional[Any]=0.0 , **__a : List[str] , ) -> str:
super().__init__(**__a )
_UpperCamelCase : str = image_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : Optional[Any] = patch_sizes
_UpperCamelCase : Dict = strides
_UpperCamelCase : Tuple = hidden_sizes
_UpperCamelCase : List[str] = depths
_UpperCamelCase : List[Any] = mlp_ratios
_UpperCamelCase : Tuple = hidden_act
_UpperCamelCase : Union[str, Any] = initializer_range
_UpperCamelCase : str = layer_norm_eps
_UpperCamelCase : List[str] = layer_scale_init_value
_UpperCamelCase : Optional[int] = drop_path_rate
_UpperCamelCase : Union[str, Any] = dropout_rate
| 350
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : List[Any] , __a : str=13 , __a : Any=30 , __a : List[str]=2 , __a : Dict=3 , __a : Union[str, Any]=True , __a : Dict=True , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : str=0.1 , __a : Optional[int]=0.1 , __a : Union[str, Any]=10 , __a : Optional[Any]=0.02 , __a : List[Any]=None , __a : str=2 , ) -> int:
_UpperCamelCase : Tuple = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : int = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = type_sequence_label_size
_UpperCamelCase : int = initializer_range
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Any = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCamelCase : Optional[int] = num_patches + 1
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Union[str, Any] = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Any = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = ViTModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[int] , __a : int ) -> Optional[int]:
_UpperCamelCase : Tuple = ViTForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Union[str, Any] = ViTForMaskedImageModeling(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : Dict = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : int , __a : Dict ) -> int:
_UpperCamelCase : Any = self.type_sequence_label_size
_UpperCamelCase : Optional[Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : int = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Union[str, Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : List[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : Union[str, Any] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :Any = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :str = True
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Dict = ViTModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__a )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[str] = [*signature.parameters.keys()]
_UpperCamelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[str] = ViTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : List[Any] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__a )
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[Any] = prepare_img()
_UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : Dict = model(**__a )
# verify the logits
_UpperCamelCase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_UpperCamelCase : List[str] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__a )
_UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : str = model(__a , interpolate_pos_encoding=__a )
# verify the logits
_UpperCamelCase : int = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , __a )
_UpperCamelCase : int = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
_UpperCamelCase : int = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : Union[str, Any] = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCamelCase : int = model(__a )
| 310
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
torch.manual_seed(0 )
_UpperCamelCase : Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
torch.manual_seed(0 )
_UpperCamelCase : Dict = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
torch.manual_seed(0 )
_UpperCamelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__A )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
_UpperCamelCase : str = self.dummy_uncond_unet
_UpperCamelCase : str = DDIMScheduler()
_UpperCamelCase : Optional[int] = self.dummy_vq_model
_UpperCamelCase : Tuple = LDMPipeline(unet=__A , vqvae=__A , scheduler=__A )
ldm.to(__A )
ldm.set_progress_bar_config(disable=__A )
_UpperCamelCase : Optional[int] = torch.manual_seed(0 )
_UpperCamelCase : Optional[int] = ldm(generator=__A , num_inference_steps=2 , output_type="numpy" ).images
_UpperCamelCase : Optional[int] = torch.manual_seed(0 )
_UpperCamelCase : Tuple = ldm(generator=__A , num_inference_steps=2 , output_type="numpy" , return_dict=__A )[0]
_UpperCamelCase : int = image[0, -3:, -3:, -1]
_UpperCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase : Dict = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
_UpperCamelCase : Any = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : str = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(__A )
ldm.set_progress_bar_config(disable=__A )
_UpperCamelCase : Optional[int] = torch.manual_seed(0 )
_UpperCamelCase : Any = ldm(generator=__A , num_inference_steps=5 , output_type="numpy" ).images
_UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase : Dict = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
_UpperCamelCase : Optional[int] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 351
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[int] = -1
_UpperCamelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Any = TextStreamer(__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Optional[int] = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Dict = -1
_UpperCamelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : List[str] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[int] = tokenizer.decode(greedy_ids[0] )
_UpperCamelCase : Tuple = TextIteratorStreamer(__a )
_UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : Optional[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
_UpperCamelCase : Tuple = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Union[str, Any] = -1
_UpperCamelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : str = greedy_ids[:, input_ids.shape[1] :]
_UpperCamelCase : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Optional[int] = TextStreamer(__a , skip_prompt=__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Tuple = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("distilgpt2" )
_UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a )
_UpperCamelCase : int = -1
_UpperCamelCase : Any = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCamelCase : List[str] = TextStreamer(__a , skip_special_tokens=__a )
model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCamelCase : int = cs.out[:-1] # Remove the final "\n"
_UpperCamelCase : int = tokenizer(__a , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[Any] = -1
_UpperCamelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Any = TextIteratorStreamer(__a , timeout=0.0_01 )
_UpperCamelCase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : List[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__a ):
_UpperCamelCase : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 310
| 0
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> Tuple: # noqa: E741
"""simple docstring"""
_UpperCamelCase : Dict = len(__UpperCAmelCase )
_UpperCamelCase : int = 0
_UpperCamelCase : str = [0] * n
_UpperCamelCase : Tuple = [False] * n
_UpperCamelCase : List[Any] = [False] * n
def dfs(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ):
if parent == root:
out_edge_count += 1
_UpperCamelCase : Any = True
_UpperCamelCase : Optional[Any] = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_UpperCamelCase : Optional[Any] = dfs(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
_UpperCamelCase : Optional[int] = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
_UpperCamelCase : List[str] = True
# AP found via cycle
if at == low[to]:
_UpperCamelCase : List[Any] = True
else:
_UpperCamelCase : Optional[int] = min(low[at] ,__UpperCAmelCase )
return out_edge_count
for i in range(__UpperCAmelCase ):
if not visited[i]:
_UpperCamelCase : Any = 0
_UpperCamelCase : str = dfs(__UpperCAmelCase ,__UpperCAmelCase ,-1 ,__UpperCAmelCase )
_UpperCamelCase : str = out_edge_count > 1
for x in range(len(__UpperCAmelCase ) ):
if is_art[x] is True:
print(__UpperCAmelCase )
# Adjacency list of graph
lowerCamelCase__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 352
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
with open(lowercase_ ) as metadata_file:
_UpperCamelCase : Dict = json.load(lowercase_ )
_UpperCamelCase : str = LukeConfig(use_entity_aware_attention=lowercase_ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
_UpperCamelCase : str = torch.load(lowercase_ ,map_location="cpu" )["module"]
# Load the entity vocab file
_UpperCamelCase : Dict = load_original_entity_vocab(lowercase_ )
# add an entry for [MASK2]
_UpperCamelCase : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCamelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCamelCase : Dict = AddedToken("<ent>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
_UpperCamelCase : Union[str, Any] = AddedToken("<ent2>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"r" ) as f:
_UpperCamelCase : Tuple = json.load(lowercase_ )
_UpperCamelCase : Optional[int] = "MLukeTokenizer"
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
with open(os.path.join(lowercase_ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
_UpperCamelCase : int = MLukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(["@"] )[0]
_UpperCamelCase : str = tokenizer.convert_tokens_to_ids(["#"] )[0]
_UpperCamelCase : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
_UpperCamelCase : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 )
_UpperCamelCase : List[str] = word_emb[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCamelCase : Optional[Any] = state_dict[bias_name]
_UpperCamelCase : List[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCamelCase : Tuple = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCamelCase : Tuple = F'''encoder.layer.{layer_index}.attention.self.'''
_UpperCamelCase : List[Any] = state_dict[prefix + matrix_name]
_UpperCamelCase : str = state_dict[prefix + matrix_name]
_UpperCamelCase : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCamelCase : Any = state_dict["entity_embeddings.entity_embeddings.weight"]
_UpperCamelCase : Tuple = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : int = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCamelCase : int = state_dict["entity_predictions.bias"]
_UpperCamelCase : Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCamelCase : str = LukeForMaskedLM(config=lowercase_ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
_UpperCamelCase : List[str] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
_UpperCamelCase : Union[str, Any] = state_dict[key]
else:
_UpperCamelCase : Dict = state_dict[key]
_UpperCamelCase, _UpperCamelCase : Optional[Any] = model.load_state_dict(lowercase_ ,strict=lowercase_ )
if set(lowercase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(lowercase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ,task="entity_classification" )
_UpperCamelCase : Dict = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
_UpperCamelCase : Optional[Any] = (0, 9)
_UpperCamelCase : int = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : List[str] = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 33, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 1, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ )
_UpperCamelCase : int = "Tokyo is the capital of <mask>."
_UpperCamelCase : List[Any] = (24, 30)
_UpperCamelCase : Any = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : Optional[Any] = model(**lowercase_ )
_UpperCamelCase : int = encoding["input_ids"][0].tolist()
_UpperCamelCase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
_UpperCamelCase : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase_ )
_UpperCamelCase : Union[str, Any] = outputs.entity_logits[0][0].argmax().item()
_UpperCamelCase : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[str] = ["[MASK]", "[PAD]", "[UNK]"]
_UpperCamelCase : Tuple = [json.loads(lowercase_ ) for line in open(lowercase_ )]
_UpperCamelCase : List[str] = {}
for entry in data:
_UpperCamelCase : Any = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCamelCase : Dict = entity_id
break
_UpperCamelCase : Dict = F'''{language}:{entity_name}'''
_UpperCamelCase : str = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCamelCase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 310
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
lowerCamelCase__ = {
"google/rembert": 256,
}
lowerCamelCase__ = "▁"
class __SCREAMING_SNAKE_CASE ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ :Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ :Any = RemBertTokenizer
def __init__( self : List[Any] , __a : Optional[int]=None , __a : Union[str, Any]=None , __a : List[str]=True , __a : Any=True , __a : Tuple=False , __a : Optional[int]="[CLS]" , __a : Tuple="[SEP]" , __a : str="<unk>" , __a : List[str]="[SEP]" , __a : Union[str, Any]="<pad>" , __a : List[str]="[CLS]" , __a : Optional[Any]="[MASK]" , **__a : str , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : Tuple = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , **__snake_case , )
_UpperCamelCase : List[Any] = do_lower_case
_UpperCamelCase : Any = remove_space
_UpperCamelCase : Any = keep_accents
_UpperCamelCase : Dict = vocab_file
_UpperCamelCase : List[Any] = False if not self.vocab_file else True
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase : Any = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error("Vocabulary path ({}) should be a directory".format(__snake_case ) )
return
_UpperCamelCase : str = os.path.join(
__snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 353
|
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCamelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
lowerCamelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
lowerCamelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[List[List[str]]] , __a : List[List[str]] , __a : int = 1 , __a : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__a , hypotheses=__a , min_len=__a , max_len=__a )
}
| 310
| 0
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
_UpperCamelCase : Dict = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 354
|
"""simple docstring"""
from __future__ import annotations
from math import pi
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 0
|
"""simple docstring"""
lowerCamelCase__ = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 355
|
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCamelCase__ = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCamelCase__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if "://" in dataset_path:
_UpperCamelCase : List[Any] = dataset_path.split("://" )[1]
return dataset_path
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = not is_remote_filesystem(lowercase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase_ ) ,fs._strip_protocol(lowercase_ ) )
else:
fs.mv(lowercase_ ,lowercase_ ,recursive=lowercase_ )
def lowercase__ ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn ,"reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_UpperCamelCase : Dict = None
_UpperCamelCase : str = None
_UpperCamelCase : str = threading.Lock()
| 310
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = 'vit'
def __init__( self : Tuple , __a : str=768 , __a : Union[str, Any]=12 , __a : Dict=12 , __a : Any=3072 , __a : Any="gelu" , __a : Union[str, Any]=0.0 , __a : Any=0.0 , __a : int=0.02 , __a : Any=1e-1_2 , __a : Tuple=224 , __a : Any=16 , __a : int=3 , __a : List[str]=True , __a : Any=16 , **__a : Tuple , ) -> Tuple:
super().__init__(**__a )
_UpperCamelCase : str = hidden_size
_UpperCamelCase : int = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : Optional[int] = hidden_act
_UpperCamelCase : Any = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : Tuple = layer_norm_eps
_UpperCamelCase : Optional[Any] = image_size
_UpperCamelCase : Any = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Optional[int] = qkv_bias
_UpperCamelCase : Optional[Any] = encoder_stride
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
return 1e-4
| 356
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 310
| 0
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=_snake_case , )
assert hasattr(self , "env" )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Union[str, Any]=1 ) -> Dict:
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=_snake_case , instance_type=self.instance_type , debugger_hook_config=_snake_case , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def __SCREAMING_SNAKE_CASE ( self : int , __a : Dict ) -> int:
TrainingJobAnalytics(_snake_case ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
_UpperCamelCase : int = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_UpperCamelCase : str = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_UpperCamelCase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
_UpperCamelCase : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_UpperCamelCase : Tuple = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _snake_case )
| 357
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowerCamelCase__ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
lowerCamelCase__ = f"""https://www.google.com/search?q={query}&num=100"""
lowerCamelCase__ = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
lowerCamelCase__ = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
lowerCamelCase__ = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 310
| 0
|
"""simple docstring"""
import functools
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : List[str] = len(lowerCamelCase_ )
_UpperCamelCase : Dict = len(lowerCamelCase_ )
@functools.cache
def min_distance(lowercase_ ,lowercase_ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
_UpperCamelCase : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 ,lowerCamelCase_ ) ,1 + min_distance(lowerCamelCase_ ,indexa + 1 ) ,diff + min_distance(indexa + 1 ,indexa + 1 ) ,)
return min_distance(0 ,0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = "xlm-roberta-xl"
def __init__( self : Any , __a : Tuple=25_0880 , __a : Optional[Any]=2560 , __a : List[str]=36 , __a : Any=32 , __a : Dict=1_0240 , __a : Optional[Any]="gelu" , __a : int=0.1 , __a : Tuple=0.1 , __a : str=514 , __a : Any=1 , __a : List[Any]=0.02 , __a : List[str]=1e-0_5 , __a : Optional[Any]=1 , __a : List[Any]=0 , __a : Tuple=2 , __a : int="absolute" , __a : Dict=True , __a : Dict=None , **__a : Tuple , ) -> str:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Dict = max_position_embeddings
_UpperCamelCase : Optional[Any] = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Union[str, Any] = use_cache
_UpperCamelCase : Optional[Any] = classifier_dropout
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCamelCase : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 310
| 0
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
_UpperCamelCase : Tuple = [10, 20, 30, 40, 50, 60]
_UpperCamelCase : Optional[Any] = [2, 4, 6, 8, 10, 12]
_UpperCamelCase : List[Any] = 100
self.assertEqual(kp.calc_profit(__lowercase , __lowercase , __lowercase ) , 210 )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
self.assertRaisesRegex(__lowercase , "max_weight must greater than zero." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
self.assertRaisesRegex(__lowercase , "Weight can not be negative." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
self.assertRaisesRegex(__lowercase , "Profit can not be negative." )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
self.assertRaisesRegex(__lowercase , "max_weight must greater than zero." )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
self.assertRaisesRegex(
__lowercase , "The length of profit and weight must be same." )
if __name__ == "__main__":
unittest.main()
| 359
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( *__a : int , **__a : int ) -> List[Any]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , __a : Optional[int] , __a : str ) -> Optional[Any]:
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , image_processor=__a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : Union[str, Any] ) -> int:
_UpperCamelCase : Any = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
import datasets
_UpperCamelCase : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_UpperCamelCase : List[Any] = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
_UpperCamelCase : List[Any] = object_detector(__a , threshold=0.0 )
self.assertEqual(len(__a ) , len(__a ) )
for outputs in batch_outputs:
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
pass
@require_torch
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[str] = "hf-internal-testing/tiny-detr-mobilenetsv3"
_UpperCamelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
_UpperCamelCase : Any = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = "facebook/detr-resnet-50"
_UpperCamelCase : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : Union[str, Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : List[str] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Dict = "facebook/detr-resnet-50"
_UpperCamelCase : Optional[Any] = pipeline("object-detection" , model=__a )
_UpperCamelCase : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : Tuple = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
_UpperCamelCase : Tuple = 0.99_85
_UpperCamelCase : List[Any] = "facebook/detr-resnet-50"
_UpperCamelCase : List[str] = pipeline("object-detection" , model=__a )
_UpperCamelCase : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__a )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = "Narsil/layoutlmv3-finetuned-funsd"
_UpperCamelCase : int = 0.99_93
_UpperCamelCase : str = pipeline("object-detection" , model=__a , threshold=__a )
_UpperCamelCase : Union[str, Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 310
| 0
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 10**-10 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : int = a
while True:
_UpperCamelCase : Any = Decimal(__lowerCamelCase ) - (
Decimal(eval(__lowerCamelCase ) ) / Decimal(eval(str(diff(__lowerCamelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__lowerCamelCase ) ) < precision: # noqa: S307
return float(__lowerCamelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
print(f"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""")
# Find Square Root of 5
print(f"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""")
# Exponential Roots
print(f"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
| 360
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCamelCase__ = {"UserAgent": UserAgent().random}
def lowercase__ ( lowercase_ ) -> dict:
"""simple docstring"""
_UpperCamelCase : str = script.contents[0]
_UpperCamelCase : Any = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : str ) -> Tuple:
_UpperCamelCase : List[str] = F'''https://www.instagram.com/{username}/'''
_UpperCamelCase : Optional[Any] = self.get_json()
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> dict:
_UpperCamelCase : int = requests.get(self.url , headers=__a ).text
_UpperCamelCase : Union[str, Any] = BeautifulSoup(__a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : List[Any] ) -> str:
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : str ) -> str:
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
return self.user_data["username"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["full_name"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
return self.user_data["biography"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.user_data["business_email"]
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.user_data["external_url"]
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
return self.user_data["is_verified"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool:
return self.user_data["is_private"]
def lowercase__ ( lowercase_ = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
_UpperCamelCase : Union[str, Any] = InstagramUser(lowercase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,lowercase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = InstagramUser("github")
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 310
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] , __a : Tuple , __a : Union[str, Any] , __a : Dict = 0 ) -> Optional[int]:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = row, column
_UpperCamelCase : str = [[default_value for c in range(_a )] for r in range(_a )]
def __str__( self : Any ) -> List[str]:
_UpperCamelCase : Tuple = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_UpperCamelCase : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
_UpperCamelCase : Union[str, Any] = max(_a , len(str(_a ) ) )
_UpperCamelCase : int = F'''%{max_element_length}s'''
# Make string and return
def single_line(__a : List[Any] ) -> str:
nonlocal string_format_identifier
_UpperCamelCase : Tuple = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_a ) for row_vector in self.array )
return s
def __repr__( self : Optional[Any] ) -> Optional[int]:
return str(self )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[str] ) -> Union[str, Any]:
if not (isinstance(_a , (list, tuple) ) and len(_a ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Optional[int] , __a : Optional[int] ) -> List[Any]:
assert self.validate_indicies(_a )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Tuple , __a : Dict , __a : str ) -> Dict:
assert self.validate_indicies(_a )
_UpperCamelCase : Union[str, Any] = value
def __add__( self : List[str] , __a : str ) -> List[str]:
assert isinstance(_a , _a )
assert self.row == another.row and self.column == another.column
# Add
_UpperCamelCase : Dict = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_UpperCamelCase : List[Any] = self[r, c] + another[r, c]
return result
def __neg__( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_UpperCamelCase : List[str] = -self[r, c]
return result
def __sub__( self : Union[str, Any] , __a : Optional[Any] ) -> Union[str, Any]:
return self + (-another)
def __mul__( self : str , __a : Optional[int] ) -> int:
if isinstance(_a , (int, float) ): # Scalar multiplication
_UpperCamelCase : List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_UpperCamelCase : int = self[r, c] * another
return result
elif isinstance(_a , _a ): # Matrix multiplication
assert self.column == another.row
_UpperCamelCase : Optional[int] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_UpperCamelCase : Any = F'''Unsupported type given for another ({type(_a )})'''
raise TypeError(_a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Optional[int] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
_UpperCamelCase : Union[str, Any] = self[r, c]
return result
def __SCREAMING_SNAKE_CASE ( self : int , __a : str , __a : Any ) -> List[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_UpperCamelCase : Dict = v.transpose()
_UpperCamelCase : Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : str = Matrix(3 ,3 ,0 )
for i in range(3 ):
_UpperCamelCase : str = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
_UpperCamelCase : List[Any] = Matrix(3 ,1 ,0 )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = 1, 2, -3
_UpperCamelCase : Union[str, Any] = Matrix(3 ,1 ,0 )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Any = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCAmelCase__ ,lowerCAmelCase__ )}''' )
def lowercase__ ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 361
|
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = tau * frequency / samplerate
_UpperCamelCase : Optional[int] = sin(lowercase_ )
_UpperCamelCase : Dict = cos(lowercase_ )
_UpperCamelCase : Any = _sin / (2 * q_factor)
_UpperCamelCase : str = (1 - _cos) / 2
_UpperCamelCase : Any = 1 - _cos
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : List[str] = -2 * _cos
_UpperCamelCase : Tuple = 1 - alpha
_UpperCamelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : List[str] = tau * frequency / samplerate
_UpperCamelCase : str = sin(lowercase_ )
_UpperCamelCase : Optional[Any] = cos(lowercase_ )
_UpperCamelCase : Dict = _sin / (2 * q_factor)
_UpperCamelCase : List[Any] = (1 + _cos) / 2
_UpperCamelCase : Optional[int] = -1 - _cos
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : int = -2 * _cos
_UpperCamelCase : str = 1 - alpha
_UpperCamelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Tuple = tau * frequency / samplerate
_UpperCamelCase : Optional[int] = sin(lowercase_ )
_UpperCamelCase : Dict = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Dict = _sin / 2
_UpperCamelCase : int = 0
_UpperCamelCase : str = -ba
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : Optional[int] = -2 * _cos
_UpperCamelCase : Optional[Any] = 1 - alpha
_UpperCamelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : str = tau * frequency / samplerate
_UpperCamelCase : Optional[Any] = sin(lowercase_ )
_UpperCamelCase : Optional[int] = cos(lowercase_ )
_UpperCamelCase : int = _sin / (2 * q_factor)
_UpperCamelCase : List[str] = 1 - alpha
_UpperCamelCase : int = -2 * _cos
_UpperCamelCase : Union[str, Any] = 1 + alpha
_UpperCamelCase : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : int = tau * frequency / samplerate
_UpperCamelCase : int = sin(lowercase_ )
_UpperCamelCase : List[Any] = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Optional[int] = 10 ** (gain_db / 40)
_UpperCamelCase : str = 1 + alpha * big_a
_UpperCamelCase : Union[str, Any] = -2 * _cos
_UpperCamelCase : Optional[int] = 1 - alpha * big_a
_UpperCamelCase : int = 1 + alpha / big_a
_UpperCamelCase : Optional[Any] = -2 * _cos
_UpperCamelCase : Any = 1 - alpha / big_a
_UpperCamelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = tau * frequency / samplerate
_UpperCamelCase : Any = sin(lowercase_ )
_UpperCamelCase : Union[str, Any] = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Union[str, Any] = 10 ** (gain_db / 40)
_UpperCamelCase : Dict = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase : int = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase : Dict = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase : int = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase : List[str] = 2 * sqrt(lowercase_ ) * alpha
_UpperCamelCase : Any = big_a * (pmc + aaa)
_UpperCamelCase : Dict = 2 * big_a * mpc
_UpperCamelCase : str = big_a * (pmc - aaa)
_UpperCamelCase : Dict = ppmc + aaa
_UpperCamelCase : List[Any] = -2 * pmpc
_UpperCamelCase : Dict = ppmc - aaa
_UpperCamelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Optional[int] = tau * frequency / samplerate
_UpperCamelCase : int = sin(lowercase_ )
_UpperCamelCase : Any = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : str = 10 ** (gain_db / 40)
_UpperCamelCase : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase : List[str] = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase : Dict = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase : Optional[Any] = 2 * sqrt(lowercase_ ) * alpha
_UpperCamelCase : List[Any] = big_a * (ppmc + aaa)
_UpperCamelCase : Dict = -2 * big_a * pmpc
_UpperCamelCase : Dict = big_a * (ppmc - aaa)
_UpperCamelCase : Optional[Any] = pmc + aaa
_UpperCamelCase : Any = 2 * mpc
_UpperCamelCase : Any = pmc - aaa
_UpperCamelCase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
| 310
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowercase_ ,lowercase_ = None ,lowercase_ = None ,lowercase_ = False ,) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = cipher_alphabet or [chr(lowercase__ ) for i in range(97 ,123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
_UpperCamelCase : str = {
'a': 0.0_8497,
'b': 0.0_1492,
'c': 0.0_2202,
'd': 0.0_4253,
'e': 0.1_1162,
'f': 0.0_2228,
'g': 0.0_2015,
'h': 0.0_6094,
'i': 0.0_7546,
'j': 0.0_0153,
'k': 0.0_1292,
'l': 0.0_4025,
'm': 0.0_2406,
'n': 0.0_6749,
'o': 0.0_7507,
'p': 0.0_1929,
'q': 0.0_0095,
'r': 0.0_7587,
's': 0.0_6327,
't': 0.0_9356,
'u': 0.0_2758,
'v': 0.0_0978,
'w': 0.0_2560,
'x': 0.0_0150,
'y': 0.0_1994,
'z': 0.0_0077,
}
else:
# Custom frequencies dictionary
_UpperCamelCase : Tuple = frequencies_dict
if not case_sensitive:
_UpperCamelCase : List[Any] = ciphertext.lower()
# Chi squared statistic values
_UpperCamelCase : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowercase__ ) ):
_UpperCamelCase : int = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
_UpperCamelCase : int = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
_UpperCamelCase : Optional[Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
_UpperCamelCase : Optional[Any] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
_UpperCamelCase : Any = decrypted_with_shift.lower().count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_UpperCamelCase : Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_UpperCamelCase : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
_UpperCamelCase : str = decrypted_with_shift.count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_UpperCamelCase : Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_UpperCamelCase : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
_UpperCamelCase : Tuple = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase_ ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
_UpperCamelCase : int = min(
lowercase__ ,key=lowercase__ ,)
# Get all the data from the most likely cipher (key, decoded message)
(
_UpperCamelCase
) : str = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 362
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ )
if weight_type is not None:
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ).shape
else:
_UpperCamelCase : int = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "weight_g":
_UpperCamelCase : int = value
elif weight_type == "weight_v":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "bias":
_UpperCamelCase : int = value
else:
_UpperCamelCase : Any = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[str] = []
_UpperCamelCase : Any = fairseq_model.state_dict()
_UpperCamelCase : Union[str, Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,hf_model.config.feat_extract_norm == "group" ,)
_UpperCamelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
_UpperCamelCase : Dict = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_UpperCamelCase : Any = True
if "*" in mapped_key:
_UpperCamelCase : Dict = name.split(lowercase_ )[0].split("." )[-2]
_UpperCamelCase : Any = mapped_key.replace("*" ,lowercase_ )
if "weight_g" in name:
_UpperCamelCase : str = "weight_g"
elif "weight_v" in name:
_UpperCamelCase : Any = "weight_v"
elif "weight" in name:
_UpperCamelCase : List[str] = "weight"
elif "bias" in name:
_UpperCamelCase : List[Any] = "bias"
else:
_UpperCamelCase : str = None
set_recursively(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Any = full_name.split("conv_layers." )[-1]
_UpperCamelCase : Optional[Any] = name.split("." )
_UpperCamelCase : Union[str, Any] = int(items[0] )
_UpperCamelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCamelCase : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCamelCase : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCamelCase : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCamelCase : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Dict = SEWConfig()
if is_finetuned:
_UpperCamelCase : Dict = model.wav_encoder.wav_model.cfg
else:
_UpperCamelCase : List[Any] = model.cfg
_UpperCamelCase : Any = fs_config.conv_bias
_UpperCamelCase : str = eval(fs_config.conv_feature_layers )
_UpperCamelCase : Any = [x[0] for x in conv_layers]
_UpperCamelCase : List[Any] = [x[1] for x in conv_layers]
_UpperCamelCase : Union[str, Any] = [x[2] for x in conv_layers]
_UpperCamelCase : str = "gelu"
_UpperCamelCase : List[str] = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
_UpperCamelCase : Optional[int] = 0.0
_UpperCamelCase : Dict = fs_config.activation_fn.name
_UpperCamelCase : Any = fs_config.encoder_embed_dim
_UpperCamelCase : Optional[Any] = 0.02
_UpperCamelCase : str = fs_config.encoder_ffn_embed_dim
_UpperCamelCase : int = 1e-5
_UpperCamelCase : Optional[int] = fs_config.encoder_layerdrop
_UpperCamelCase : str = fs_config.encoder_attention_heads
_UpperCamelCase : Tuple = fs_config.conv_pos_groups
_UpperCamelCase : List[str] = fs_config.conv_pos
_UpperCamelCase : Optional[int] = len(lowercase_ )
_UpperCamelCase : Union[str, Any] = fs_config.encoder_layers
_UpperCamelCase : Union[str, Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_UpperCamelCase : List[str] = model.cfg
_UpperCamelCase : List[str] = fs_config.final_dropout
_UpperCamelCase : Optional[Any] = fs_config.layerdrop
_UpperCamelCase : int = fs_config.activation_dropout
_UpperCamelCase : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_UpperCamelCase : int = fs_config.attention_dropout
_UpperCamelCase : int = fs_config.dropout_input
_UpperCamelCase : List[Any] = fs_config.dropout
_UpperCamelCase : List[Any] = fs_config.mask_channel_length
_UpperCamelCase : List[str] = fs_config.mask_channel_prob
_UpperCamelCase : Optional[Any] = fs_config.mask_length
_UpperCamelCase : Optional[int] = fs_config.mask_prob
_UpperCamelCase : List[str] = "Wav2Vec2FeatureExtractor"
_UpperCamelCase : Optional[Any] = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=True ) -> str:
"""simple docstring"""
if is_finetuned:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_UpperCamelCase : str = SEWConfig.from_pretrained(lowercase_ )
else:
_UpperCamelCase : Optional[int] = convert_config(model[0] ,lowercase_ )
_UpperCamelCase : List[str] = model[0].eval()
_UpperCamelCase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False
_UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=lowercase_ ,return_attention_mask=lowercase_ ,)
if is_finetuned:
if dict_path:
_UpperCamelCase : Union[str, Any] = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCamelCase : List[str] = target_dict.pad_index
_UpperCamelCase : Optional[int] = target_dict.bos_index
_UpperCamelCase : Any = target_dict.pad_index
_UpperCamelCase : List[Any] = target_dict.bos_index
_UpperCamelCase : List[str] = target_dict.eos_index
_UpperCamelCase : Optional[Any] = len(target_dict.symbols )
_UpperCamelCase : List[Any] = os.path.join(lowercase_ ,"vocab.json" )
if not os.path.isdir(lowercase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase_ ) )
return
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
with open(lowercase_ ,"w" ,encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices ,lowercase_ )
_UpperCamelCase : Optional[Any] = WavaVecaCTCTokenizer(
lowercase_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=lowercase_ ,)
_UpperCamelCase : List[str] = WavaVecaProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
_UpperCamelCase : List[Any] = SEWForCTC(lowercase_ )
else:
_UpperCamelCase : int = SEWModel(lowercase_ )
feature_extractor.save_pretrained(lowercase_ )
recursively_load_weights(lowercase_ ,lowercase_ ,lowercase_ )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowerCamelCase__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 310
| 0
|
"""simple docstring"""
def lowercase__ ( lowercase_ = 3 ,lowercase_ = 7 ,lowercase_ = 1_000_000 ) -> int:
"""simple docstring"""
_UpperCamelCase : Dict = 0
_UpperCamelCase : Tuple = 1
for current_denominator in range(1 ,limit + 1 ):
_UpperCamelCase : List[str] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_UpperCamelCase : int = current_numerator
_UpperCamelCase : int = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 363
|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : int = prime_factors(lowercase_ )
if is_square_free(lowercase_ ):
return -1 if len(lowercase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 0
|
"""simple docstring"""
from __future__ import annotations
lowerCamelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCamelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowercase__ ( lowercase_ ) -> list[float]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : Any = len(a_ )
for i in range(a_ ):
_UpperCamelCase : str = -1
for j in range(i + 1 ,a_ ):
if arr[i] < arr[j]:
_UpperCamelCase : List[Any] = arr[j]
break
result.append(a_ )
return result
def lowercase__ ( lowercase_ ) -> list[float]:
"""simple docstring"""
_UpperCamelCase : Any = []
for i, outer in enumerate(a_ ):
_UpperCamelCase : Tuple = -1
for inner in arr[i + 1 :]:
if outer < inner:
_UpperCamelCase : List[Any] = inner
break
result.append(a_ )
return result
def lowercase__ ( lowercase_ ) -> list[float]:
"""simple docstring"""
_UpperCamelCase : List[str] = len(a_ )
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : List[str] = [-1] * arr_size
for index in reversed(range(a_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_UpperCamelCase : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCamelCase__ = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 364
|
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = GPTaTokenizer
SCREAMING_SNAKE_CASE__ :Tuple = GPTaTokenizerFast
SCREAMING_SNAKE_CASE__ :Dict = True
SCREAMING_SNAKE_CASE__ :int = {"add_prefix_space": True}
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_UpperCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCamelCase : str = {"unk_token": "<unk>"}
_UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Optional[int] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Union[str, Any] ) -> int:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> Tuple:
_UpperCamelCase : List[Any] = "lower newer"
_UpperCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase : Optional[Any] = "lower newer"
_UpperCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_UpperCamelCase : Any = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : str = tokens + [tokenizer.unk_token]
_UpperCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = "lower newer"
# Testing tokenization
_UpperCamelCase : str = tokenizer.tokenize(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
_UpperCamelCase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
_UpperCamelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : List[Any] = tokenizer.encode(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
_UpperCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_UpperCamelCase : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : int , *__a : int , **__a : List[Any] ) -> Union[str, Any]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int=15 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
_UpperCamelCase : Optional[int] = "This is a simple input"
_UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Dict = ("This is a simple input", "This is a pair")
_UpperCamelCase : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
_UpperCamelCase : Union[str, Any] = "This is a simple input"
_UpperCamelCase : Optional[Any] = ["This is a simple input looooooooong", "This is a simple input"]
_UpperCamelCase : str = ("This is a simple input", "This is a pair")
_UpperCamelCase : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_UpperCamelCase : Union[str, Any] = tokenizer.pad_token_id
_UpperCamelCase : str = tokenizer(__a , padding="max_length" , max_length=30 , return_tensors="np" )
_UpperCamelCase : Tuple = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
_UpperCamelCase : str = tokenizer(*__a , padding="max_length" , max_length=60 , return_tensors="np" )
_UpperCamelCase : Optional[int] = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
_UpperCamelCase : Any = "$$$"
_UpperCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
_UpperCamelCase : int = "This is a simple input"
_UpperCamelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Union[str, Any] = tokenizer.bos_token_id
_UpperCamelCase : str = tokenizer(__a )
_UpperCamelCase : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCamelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_UpperCamelCase : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
_UpperCamelCase : Optional[Any] = [self.get_tokenizer(do_lower_case=__a , add_bos_token=__a )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : Tuple = "Encode this."
_UpperCamelCase : List[str] = "This one too please."
_UpperCamelCase : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a )
encoded_sequence += tokenizer.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer.encode_plus(
__a , __a , add_special_tokens=__a , return_special_tokens_mask=__a , )
_UpperCamelCase : str = encoded_sequence_dict["input_ids"]
_UpperCamelCase : Optional[int] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(__a ) , len(__a ) )
_UpperCamelCase : Union[str, Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__a )
]
_UpperCamelCase : Union[str, Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__a , __a )
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Any = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("test_opt" )
_UpperCamelCase : str = AutoTokenizer.from_pretrained("./test_opt" )
_UpperCamelCase : Optional[Any] = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
_UpperCamelCase : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Union[str, Any] = tokenizer.encode(
__a , )
# Same as above
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[str] = "bos"
_UpperCamelCase : Tuple = tokenizer.get_vocab()["bos"]
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : List[Any] = tokenizer.encode(
__a , )
# We changed the bos token
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("./tok" )
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
_UpperCamelCase : Tuple = tokenizer.encode(
__a , )
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
| 310
| 0
|
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int
SCREAMING_SNAKE_CASE__ :jnp.dtype = jnp.floataa
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[int] , __a : int ) -> Any:
_UpperCamelCase : Dict = hidden_states.shape
_UpperCamelCase : int = jax.image.resize(
_a , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
_UpperCamelCase : Optional[int] = self.conv(_a )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int
SCREAMING_SNAKE_CASE__ :jnp.dtype = jnp.floataa
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
_UpperCamelCase : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Dict , __a : List[str] ) -> str:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_UpperCamelCase : str = self.conv(_a )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int
SCREAMING_SNAKE_CASE__ :int = None
SCREAMING_SNAKE_CASE__ :float = 0.0
SCREAMING_SNAKE_CASE__ :bool = None
SCREAMING_SNAKE_CASE__ :jnp.dtype = jnp.floataa
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Optional[int] = self.in_channels if self.out_channels is None else self.out_channels
_UpperCamelCase : Tuple = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_UpperCamelCase : Dict = nn.Conv(
_a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCamelCase : List[str] = nn.Dense(_a , dtype=self.dtype )
_UpperCamelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_UpperCamelCase : Tuple = nn.Dropout(self.dropout_prob )
_UpperCamelCase : List[Any] = nn.Conv(
_a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCamelCase : Optional[Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_UpperCamelCase : Union[str, Any] = None
if use_nin_shortcut:
_UpperCamelCase : List[str] = nn.Conv(
_a , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self : Optional[int] , __a : Dict , __a : List[Any] , __a : Union[str, Any]=True ) -> Dict:
_UpperCamelCase : List[Any] = hidden_states
_UpperCamelCase : Tuple = self.norma(_a )
_UpperCamelCase : Dict = nn.swish(_a )
_UpperCamelCase : str = self.conva(_a )
_UpperCamelCase : List[Any] = self.time_emb_proj(nn.swish(_a ) )
_UpperCamelCase : Optional[int] = jnp.expand_dims(jnp.expand_dims(_a , 1 ) , 1 )
_UpperCamelCase : Optional[int] = hidden_states + temb
_UpperCamelCase : List[str] = self.norma(_a )
_UpperCamelCase : Tuple = nn.swish(_a )
_UpperCamelCase : Optional[int] = self.dropout(_a , _a )
_UpperCamelCase : Tuple = self.conva(_a )
if self.conv_shortcut is not None:
_UpperCamelCase : Any = self.conv_shortcut(_a )
return hidden_states + residual
| 365
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCamelCase__ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __SCREAMING_SNAKE_CASE ( unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = load_tool("text-question-answering" )
self.tool.setup()
_UpperCamelCase : Union[str, Any] = load_tool("text-question-answering" , remote=__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Dict = self.tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.remote_tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Dict = self.tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : List[Any] = self.remote_tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
| 310
| 0
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Union[str, Any] , __a : Any=None , __a : Tuple=None , __a : Union[str, Any]=None , __a : List[str]="resnet50" , __a : List[str]=3 , __a : List[str]=32 , __a : List[str]=3 , __a : Union[str, Any]=True , __a : Optional[int]=True , ) -> Optional[int]:
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Any = out_indices if out_indices is not None else [4]
_UpperCamelCase : Optional[int] = stage_names
_UpperCamelCase : int = out_features
_UpperCamelCase : Tuple = backbone
_UpperCamelCase : int = batch_size
_UpperCamelCase : Dict = image_size
_UpperCamelCase : Any = num_channels
_UpperCamelCase : List[Any] = use_pretrained_backbone
_UpperCamelCase : Optional[int] = is_training
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : str = self.get_config()
return config, pixel_values
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Optional[Any] , __a : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Tuple = TimmBackbone(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
_UpperCamelCase : Optional[Any] = model(__UpperCAmelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Tuple = self.prepare_config_and_inputs()
_UpperCamelCase : List[str] = config_and_inputs
_UpperCamelCase : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ :Dict = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = TimmBackboneModelTester(self )
_UpperCamelCase : Optional[int] = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : List[Any] = """resnet18"""
_UpperCamelCase : Optional[int] = """microsoft/resnet-18"""
_UpperCamelCase : str = AutoBackbone.from_pretrained(__UpperCAmelCase , use_timm_backbone=__UpperCAmelCase )
_UpperCamelCase : Optional[Any] = AutoBackbone.from_pretrained(__UpperCAmelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
_UpperCamelCase : str = AutoBackbone.from_pretrained(__UpperCAmelCase , use_timm_backbone=__UpperCAmelCase , out_indices=[1, 2, 3] )
_UpperCamelCase : List[str] = AutoBackbone.from_pretrained(__UpperCAmelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
pass
@unittest.skip("Safetensors is not supported by timm." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : str = model_class(__UpperCAmelCase )
_UpperCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Union[str, Any] = [*signature.parameters.keys()]
_UpperCamelCase : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : str = True
_UpperCamelCase : Optional[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
_UpperCamelCase : List[str] = self.all_model_classes[0]
_UpperCamelCase : Any = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
_UpperCamelCase : Tuple = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
_UpperCamelCase : int = model(**__UpperCAmelCase )
_UpperCamelCase : str = outputs[0][-1]
# Encoder-/Decoder-only models
_UpperCamelCase : Union[str, Any] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_UpperCamelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__UpperCAmelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_UpperCamelCase : Union[str, Any] = model(**__UpperCAmelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_UpperCamelCase : Tuple = copy.deepcopy(__UpperCAmelCase )
_UpperCamelCase : Any = None
_UpperCamelCase : Dict = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_UpperCamelCase : List[Any] = model(**__UpperCAmelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
_UpperCamelCase : Optional[Any] = copy.deepcopy(__UpperCAmelCase )
_UpperCamelCase : Tuple = False
_UpperCamelCase : List[str] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_UpperCamelCase : List[Any] = model(**__UpperCAmelCase )
| 366
|
"""simple docstring"""
lowerCamelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Tuple = [False] * len(lowercase_ )
_UpperCamelCase : Dict = [s]
_UpperCamelCase : List[str] = True
while queue:
_UpperCamelCase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase_ )
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : List[str] = u
return visited[t]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = [-1] * (len(lowercase_ ))
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : str = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ):
_UpperCamelCase : int = float("Inf" )
_UpperCamelCase : Optional[Any] = sink
while s != source:
# Find the minimum value in select path
_UpperCamelCase : List[Any] = min(lowercase_ ,graph[parent[s]][s] )
_UpperCamelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_UpperCamelCase : Union[str, Any] = sink
while v != source:
_UpperCamelCase : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCamelCase : Dict = parent[v]
for i in range(len(lowercase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 310
| 0
|
"""simple docstring"""
from math import factorial
lowerCamelCase__ = {str(d): factorial(d) for d in range(10)}
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(a_ ) )
def lowercase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 ,a_ ) if sum_of_digit_factorial(a_ ) == i )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 367
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(lowercase_ ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase_ ,(list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase_ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ["pixel_values"]
def __init__( self : List[str] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : List[Any] , ) -> None:
super().__init__(**__a )
_UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 256}
_UpperCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : int = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCamelCase : Optional[Any] = get_size_dict(__a , param_name="crop_size" )
_UpperCamelCase : str = do_resize
_UpperCamelCase : Dict = size
_UpperCamelCase : int = do_center_crop
_UpperCamelCase : int = crop_size
_UpperCamelCase : Optional[Any] = resample
_UpperCamelCase : Dict = do_rescale
_UpperCamelCase : Any = rescale_factor
_UpperCamelCase : Any = offset
_UpperCamelCase : Union[str, Any] = do_normalize
_UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __SCREAMING_SNAKE_CASE ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray:
_UpperCamelCase : Any = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" in size:
_UpperCamelCase : str = get_resize_output_image_size(__a , size["shortest_edge"] , default_to_square=__a )
elif "height" in size and "width" in size:
_UpperCamelCase : Any = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ) -> np.ndarray:
_UpperCamelCase : List[Any] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Union[int, float] , __a : bool = True , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> Optional[Any]:
_UpperCamelCase : Any = image.astype(np.floataa )
if offset:
_UpperCamelCase : Dict = image - (scale / 2)
return rescale(__a , scale=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
_UpperCamelCase : Optional[Any] = to_numpy_array(__a )
if do_resize:
_UpperCamelCase : Any = self.resize(image=__a , size=__a , resample=__a )
if do_center_crop:
_UpperCamelCase : Dict = self.center_crop(__a , size=__a )
if do_rescale:
_UpperCamelCase : Union[str, Any] = self.rescale(image=__a , scale=__a , offset=__a )
if do_normalize:
_UpperCamelCase : int = self.normalize(image=__a , mean=__a , std=__a )
_UpperCamelCase : str = to_channel_dimension_format(__a , __a )
return image
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
_UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Optional[int] = resample if resample is not None else self.resample
_UpperCamelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : str = offset if offset is not None else self.offset
_UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : Tuple = image_std if image_std is not None else self.image_std
_UpperCamelCase : int = size if size is not None else self.size
_UpperCamelCase : Tuple = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase : Optional[int] = get_size_dict(__a , param_name="crop_size" )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
_UpperCamelCase : Union[str, Any] = make_batched(__a )
_UpperCamelCase : Optional[Any] = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
_UpperCamelCase : List[Any] = {"pixel_values": videos}
return BatchFeature(data=__a , tensor_type=__a )
| 310
| 0
|
"""simple docstring"""
import math
def lowercase__ ( lowercase_ ,lowercase_ ) -> float:
"""simple docstring"""
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 368
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCamelCase__ = True
except ImportError:
lowerCamelCase__ = False
try:
from torch.hub import _get_torch_home
lowerCamelCase__ = _get_torch_home()
except ImportError:
lowerCamelCase__ = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
lowerCamelCase__ = os.path.join(torch_cache_home, "transformers")
lowerCamelCase__ = "https://cdn.huggingface.co"
lowerCamelCase__ = "https://s3.amazonaws.com/models.huggingface.co/bert"
lowerCamelCase__ = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
lowerCamelCase__ = os.path.join(PATH, "config.yaml")
lowerCamelCase__ = os.path.join(PATH, "attributes.txt")
lowerCamelCase__ = os.path.join(PATH, "objects.txt")
lowerCamelCase__ = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
lowerCamelCase__ = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
lowerCamelCase__ = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
lowerCamelCase__ = "pytorch_model.bin"
lowerCamelCase__ = "config.yaml"
def lowercase__ ( lowercase_=OBJECTS ,lowercase_=ATTRIBUTES ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
_UpperCamelCase : Any = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = OrderedDict()
with open(lowercase_ ,"rb" ) as f:
_UpperCamelCase : List[str] = pkl.load(lowercase_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
_UpperCamelCase : List[str] = ckp.pop(lowercase_ )
if isinstance(lowercase_ ,np.ndarray ):
_UpperCamelCase : List[Any] = torch.tensor(lowercase_ )
else:
assert isinstance(lowercase_ ,torch.tensor ), type(lowercase_ )
_UpperCamelCase : Optional[Any] = v
return r
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = {}
def __init__( self : str , __a : dict , __a : str = "root" , __a : Any=0 ) -> Any:
_UpperCamelCase : Optional[Any] = name
_UpperCamelCase : Optional[Any] = level
_UpperCamelCase : Union[str, Any] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_UpperCamelCase : Optional[int] = copy.deepcopy(__a )
_UpperCamelCase : Dict = copy.deepcopy(__a )
if isinstance(__a , __a ):
_UpperCamelCase : Union[str, Any] = Config(__a , name=__a , level=level + 1 )
_UpperCamelCase : Optional[Any] = v
setattr(self , __a , __a )
_UpperCamelCase : Optional[Any] = d
def __repr__( self : List[str] ) -> List[Any]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Dict , __a : Union[str, Any] , __a : Optional[int] ) -> int:
_UpperCamelCase : Any = val
_UpperCamelCase : Optional[Any] = val
_UpperCamelCase : Dict = key.split("." )
_UpperCamelCase : int = len(__a ) - 1
_UpperCamelCase : List[str] = self._pointer
if len(__a ) > 1:
for i, l in enumerate(__a ):
if hasattr(self , __a ) and isinstance(getattr(self , __a ) , __a ):
setattr(getattr(self , __a ) , ".".join(levels[i:] ) , __a )
if l == last_level:
_UpperCamelCase : str = val
else:
_UpperCamelCase : List[str] = pointer[l]
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._pointer
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Tuple , __a : List[str] ) -> Dict:
with open(F'''{file_name}''' , "w" ) as stream:
dump(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[Any] , __a : Dict ) -> List[Any]:
with open(F'''{file_name}''' , "w" ) as stream:
json.dump(__a , __a )
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : Union[str, Any] ) -> Optional[int]:
with open(__a ) as stream:
_UpperCamelCase : int = load(__a , Loader=__a )
return data
def __str__( self : List[str] ) -> Tuple:
_UpperCamelCase : List[str] = " "
if self._name != "root":
_UpperCamelCase : Dict = F'''{t * (self._level-1)}{self._name}:\n'''
else:
_UpperCamelCase : Any = ""
_UpperCamelCase : Any = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__a , __a ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(__a ).__name__})\n'''
_UpperCamelCase : Optional[Any] = level
return r[:-1]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Dict , __a : str , **__a : str ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : int = cls.get_config_dict(__a , **__a )
return cls(__a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , __a : str , **__a : Union[str, Any] ) -> Tuple:
_UpperCamelCase : Tuple = kwargs.pop("cache_dir" , __a )
_UpperCamelCase : Optional[int] = kwargs.pop("force_download" , __a )
_UpperCamelCase : str = kwargs.pop("resume_download" , __a )
_UpperCamelCase : Any = kwargs.pop("proxies" , __a )
_UpperCamelCase : List[Any] = kwargs.pop("local_files_only" , __a )
if os.path.isdir(__a ):
_UpperCamelCase : Optional[Any] = os.path.join(__a , __a )
elif os.path.isfile(__a ) or is_remote_url(__a ):
_UpperCamelCase : Optional[int] = pretrained_model_name_or_path
else:
_UpperCamelCase : int = hf_bucket_url(__a , filename=__a , use_cdn=__a )
try:
# Load from URL or cache if already cached
_UpperCamelCase : Optional[int] = cached_path(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_UpperCamelCase : List[Any] = Config.load_yaml(__a )
except EnvironmentError:
_UpperCamelCase : Union[str, Any] = "Can't load config for"
raise EnvironmentError(__a )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(__a ), kwargs
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : str = torch.load("dump.pt" ,map_location=in_tensor.device )
_UpperCamelCase : str = in_tensor.numpy()
_UpperCamelCase : Union[str, Any] = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ), (
F'''{sum([1 for x in np.isclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Dict = urlparse(lowercase_ )
return parsed.scheme in ("http", "https")
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=True ) -> str:
"""simple docstring"""
_UpperCamelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_UpperCamelCase : List[str] = "/" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=0 ,lowercase_=None ,) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowercase_ ,lowercase_ ):
ua += "; " + "; ".join("{}/{}".format(lowercase_ ,lowercase_ ) for k, v in user_agent.items() )
elif isinstance(lowercase_ ,lowercase_ ):
ua += "; " + user_agent
_UpperCamelCase : Any = {"user-agent": ua}
if resume_size > 0:
_UpperCamelCase : str = "bytes=%d-" % (resume_size,)
_UpperCamelCase : str = requests.get(lowercase_ ,stream=lowercase_ ,proxies=lowercase_ ,headers=lowercase_ )
if response.status_code == 416: # Range not satisfiable
return
_UpperCamelCase : List[str] = response.headers.get("Content-Length" )
_UpperCamelCase : Union[str, Any] = resume_size + int(lowercase_ ) if content_length is not None else None
_UpperCamelCase : Optional[int] = tqdm(
unit="B" ,unit_scale=lowercase_ ,total=lowercase_ ,initial=lowercase_ ,desc="Downloading" ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowercase_ ) )
temp_file.write(lowercase_ )
progress.close()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=10 ,lowercase_=False ,lowercase_=None ,lowercase_=False ,) -> Tuple:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : str = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : Dict = str(lowercase_ )
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
_UpperCamelCase : Dict = None
if not local_files_only:
try:
_UpperCamelCase : List[Any] = requests.head(lowercase_ ,allow_redirects=lowercase_ ,proxies=lowercase_ ,timeout=lowercase_ )
if response.status_code == 200:
_UpperCamelCase : str = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_UpperCamelCase : int = url_to_filename(lowercase_ ,lowercase_ )
# get cache path to put the file
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowercase_ ):
return cache_path
else:
_UpperCamelCase : Optional[int] = [
file
for file in fnmatch.filter(os.listdir(lowercase_ ) ,filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(lowercase_ ) > 0:
return os.path.join(lowercase_ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(lowercase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_UpperCamelCase : Dict = cache_path + ".lock"
with FileLock(lowercase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowercase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_UpperCamelCase : List[str] = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(lowercase_ ,"a+b" ) as f:
yield f
_UpperCamelCase : Union[str, Any] = _resumable_file_manager
if os.path.exists(lowercase_ ):
_UpperCamelCase : str = os.stat(lowercase_ ).st_size
else:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Tuple = partial(tempfile.NamedTemporaryFile ,dir=lowercase_ ,delete=lowercase_ )
_UpperCamelCase : Optional[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" ,lowercase_ ,temp_file.name ,)
http_get(
lowercase_ ,lowercase_ ,proxies=lowercase_ ,resume_size=lowercase_ ,user_agent=lowercase_ ,)
os.replace(temp_file.name ,lowercase_ )
_UpperCamelCase : Optional[int] = {"url": url, "etag": etag}
_UpperCamelCase : List[str] = cache_path + ".json"
with open(lowercase_ ,"w" ) as meta_file:
json.dump(lowercase_ ,lowercase_ )
return cache_path
def lowercase__ ( lowercase_ ,lowercase_=None ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[int] = url.encode("utf-8" )
_UpperCamelCase : List[str] = shaaaa(lowercase_ )
_UpperCamelCase : List[str] = url_hash.hexdigest()
if etag:
_UpperCamelCase : Optional[Any] = etag.encode("utf-8" )
_UpperCamelCase : Optional[Any] = shaaaa(lowercase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=False ,lowercase_=False ,) -> str:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : List[Any] = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if is_remote_url(lowercase_ ):
# URL, so get it from the cache (downloading if necessary)
_UpperCamelCase : Union[str, Any] = get_from_cache(
lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,proxies=lowercase_ ,resume_download=lowercase_ ,user_agent=lowercase_ ,local_files_only=lowercase_ ,)
elif os.path.exists(lowercase_ ):
# File, and it exists.
_UpperCamelCase : List[str] = url_or_filename
elif urlparse(lowercase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(lowercase_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(lowercase_ ) )
if extract_compressed_file:
if not is_zipfile(lowercase_ ) and not tarfile.is_tarfile(lowercase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_UpperCamelCase, _UpperCamelCase : Any = os.path.split(lowercase_ )
_UpperCamelCase : Optional[int] = output_file.replace("." ,"-" ) + "-extracted"
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
if os.path.isdir(lowercase_ ) and os.listdir(lowercase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_UpperCamelCase : Optional[int] = output_path + ".lock"
with FileLock(lowercase_ ):
shutil.rmtree(lowercase_ ,ignore_errors=lowercase_ )
os.makedirs(lowercase_ )
if is_zipfile(lowercase_ ):
with ZipFile(lowercase_ ,"r" ) as zip_file:
zip_file.extractall(lowercase_ )
zip_file.close()
elif tarfile.is_tarfile(lowercase_ ):
_UpperCamelCase : int = tarfile.open(lowercase_ )
tar_file.extractall(lowercase_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(lowercase_ ) )
return output_path_extracted
return output_path
def lowercase__ ( lowercase_ ,lowercase_="," ) -> Optional[int]:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
with open(lowercase_ ) as f:
_UpperCamelCase : Tuple = eval(f.read() )
else:
_UpperCamelCase : str = requests.get(lowercase_ )
try:
_UpperCamelCase : Optional[int] = requests.json()
except Exception:
_UpperCamelCase : Union[str, Any] = req.content.decode()
assert data is not None, "could not connect"
try:
_UpperCamelCase : List[Any] = eval(lowercase_ )
except Exception:
_UpperCamelCase : int = data.split("\n" )
req.close()
return data
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : List[Any] = requests.get(lowercase_ )
_UpperCamelCase : Optional[int] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : List[Any] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowercase_ )
with open(lowercase_ ,"rb" ) as stream:
_UpperCamelCase : Union[str, Any] = pkl.load(lowercase_ )
_UpperCamelCase : Union[str, Any] = weights.pop("model" )
_UpperCamelCase : Optional[int] = {}
for k, v in model.items():
_UpperCamelCase : str = torch.from_numpy(lowercase_ )
if "running_var" in k:
_UpperCamelCase : List[Any] = torch.tensor([0] )
_UpperCamelCase : str = k.replace("running_var" ,"num_batches_tracked" )
_UpperCamelCase : Any = zero
return new
def lowercase__ ( ) -> Dict:
"""simple docstring"""
print(F'''{os.path.abspath(os.path.join(lowercase_ ,os.pardir ) )}/demo.ipynb''' )
def lowercase__ ( lowercase_ ,lowercase_="RGB" ) -> int:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : Optional[Any] = cva.imread(lowercase_ )
else:
_UpperCamelCase : Optional[int] = get_image_from_url(lowercase_ )
assert img is not None, F'''could not connect to: {im}'''
_UpperCamelCase : Optional[int] = cva.cvtColor(lowercase_ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
_UpperCamelCase : List[Any] = img[:, :, ::-1]
return img
def lowercase__ ( lowercase_ ,lowercase_=1 ) -> List[Any]:
"""simple docstring"""
return (images[i : i + batch] for i in range(0 ,len(lowercase_ ) ,lowercase_ ))
| 310
| 0
|
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __SCREAMING_SNAKE_CASE ( a__ ):
'''simple docstring'''
def __init__( self : List[str] , __a : List[Any] , __a : Optional[int]=768 ) -> List[Any]:
super().__init__(_lowerCamelCase )
_UpperCamelCase : Dict = proj_size
_UpperCamelCase : Any = CLIPVisionModel(_lowerCamelCase )
_UpperCamelCase : Tuple = PaintByExampleMapper(_lowerCamelCase )
_UpperCamelCase : str = nn.LayerNorm(config.hidden_size )
_UpperCamelCase : int = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
_UpperCamelCase : Optional[Any] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : List[str] , __a : str=False ) -> int:
_UpperCamelCase : Optional[Any] = self.model(pixel_values=_lowerCamelCase )
_UpperCamelCase : List[Any] = clip_output.pooler_output
_UpperCamelCase : Optional[Any] = self.mapper(latent_states[:, None] )
_UpperCamelCase : Tuple = self.final_layer_norm(_lowerCamelCase )
_UpperCamelCase : Any = self.proj_out(_lowerCamelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , __a : Union[str, Any] ) -> List[str]:
super().__init__()
_UpperCamelCase : str = (config.num_hidden_layers + 1) // 5
_UpperCamelCase : List[Any] = config.hidden_size
_UpperCamelCase : Dict = 1
_UpperCamelCase : Optional[Any] = nn.ModuleList(
[
BasicTransformerBlock(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , activation_fn="gelu" , attention_bias=_lowerCamelCase )
for _ in range(_lowerCamelCase )
] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[str] ) -> int:
for block in self.blocks:
_UpperCamelCase : Optional[int] = block(_lowerCamelCase )
return hidden_states
| 369
|
"""simple docstring"""
import torch
from transformers import AutoModel
class __SCREAMING_SNAKE_CASE ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple="sayef/fsner-bert-base-uncased" ) -> Dict:
super(__a , self ).__init__()
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained(__a , return_dict=__a )
_UpperCamelCase : str = torch.nn.CosineSimilarity(3 , 1e-0_8 )
_UpperCamelCase : List[str] = torch.nn.Softmax(dim=1 )
def __SCREAMING_SNAKE_CASE ( self : int , **__a : Tuple ) -> Optional[Any]:
return self.bert(**__a ).last_hidden_state
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Any , __a : List[Any] , __a : Tuple=1 ) -> List[Any]:
return self.softmax(T * self.cos(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Dict ) -> Union[str, Any]:
_UpperCamelCase : str = W_supports["sizes"].tolist()
_UpperCamelCase : Any = W_supports["start_token_id"].item()
_UpperCamelCase : Optional[Any] = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_UpperCamelCase : str = self.BERT(**__a )
_UpperCamelCase : int = self.BERT(**__a )
_UpperCamelCase : int = None
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : List[Any] = W_supports["input_ids"] == start_token_id
_UpperCamelCase : Optional[int] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Any = support_sizes[i - 1]
_UpperCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]]
_UpperCamelCase : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_UpperCamelCase : List[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_UpperCamelCase : Any = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_UpperCamelCase : Any = torch.vstack((p_starts, p_start) )
_UpperCamelCase : Any = torch.vstack((p_ends, p_end) )
else:
_UpperCamelCase : Optional[Any] = p_start
_UpperCamelCase : str = p_end
return p_starts, p_ends
| 310
| 0
|
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : Any = len(__SCREAMING_SNAKE_CASE ), len(grid[0] )
if (
min(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_UpperCamelCase : List[Any] = 0
count += depth_first_search(__SCREAMING_SNAKE_CASE ,row + 1 ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
count += depth_first_search(__SCREAMING_SNAKE_CASE ,row - 1 ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
count += depth_first_search(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,col + 1 ,__SCREAMING_SNAKE_CASE )
count += depth_first_search(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,col - 1 ,__SCREAMING_SNAKE_CASE )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
"""simple docstring"""
from typing import Any
def lowercase__ ( lowercase_ ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
_UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list]
_UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
_UpperCamelCase : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase_ , "width_multiplier" ) )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Tuple , __a : List[str]=13 , __a : Optional[int]=64 , __a : int=2 , __a : Optional[int]=3 , __a : Dict="swish" , __a : Union[str, Any]=3 , __a : int=32 , __a : Union[str, Any]=0.1 , __a : str=0.02 , __a : List[str]=True , __a : List[str]=True , __a : Optional[Any]=10 , __a : str=None , __a : Union[str, Any]=0.25 , __a : str=0.0 , __a : int=0.0 , ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : str = image_size
_UpperCamelCase : Dict = patch_size
_UpperCamelCase : int = num_channels
_UpperCamelCase : str = make_divisible(512 * width_multiplier , divisor=8 )
_UpperCamelCase : Tuple = hidden_act
_UpperCamelCase : Union[str, Any] = conv_kernel_size
_UpperCamelCase : Optional[int] = output_stride
_UpperCamelCase : List[str] = classifier_dropout_prob
_UpperCamelCase : Optional[Any] = use_labels
_UpperCamelCase : List[Any] = is_training
_UpperCamelCase : List[Any] = num_labels
_UpperCamelCase : int = initializer_range
_UpperCamelCase : List[Any] = scope
_UpperCamelCase : List[str] = width_multiplier
_UpperCamelCase : Dict = ffn_dropout
_UpperCamelCase : Any = attn_dropout
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
_UpperCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : List[str] = None
_UpperCamelCase : int = None
if self.use_labels:
_UpperCamelCase : Any = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCamelCase : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Optional[int] , __a : Any , __a : List[str] , __a : Optional[int] ) -> int:
_UpperCamelCase : Optional[int] = MobileViTVaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_UpperCamelCase : Any = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Tuple , __a : Any , __a : int , __a : Optional[Any] ) -> Union[str, Any]:
_UpperCamelCase : Dict = self.num_labels
_UpperCamelCase : Dict = MobileViTVaForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_UpperCamelCase : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : str , __a : Tuple , __a : str , __a : Any ) -> Dict:
_UpperCamelCase : Any = self.num_labels
_UpperCamelCase : Dict = MobileViTVaForSemanticSegmentation(lowercase_ )
model.to(lowercase_ )
model.eval()
_UpperCamelCase : Optional[int] = model(lowercase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_UpperCamelCase : Optional[Any] = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
_UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCamelCase : Optional[int] = config_and_inputs
_UpperCamelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : Optional[int] = MobileViTVaModelTester(self )
_UpperCamelCase : List[str] = MobileViTVaConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
pass
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Tuple = model_class(lowercase_ )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Union[str, Any] = [*signature.parameters.keys()]
_UpperCamelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
def check_hidden_states_output(__a : Optional[int] , __a : Tuple , __a : Any ):
_UpperCamelCase : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
_UpperCamelCase : Tuple = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
_UpperCamelCase : List[str] = outputs.hidden_states
_UpperCamelCase : List[Any] = 5
self.assertEqual(len(lowercase_ ) , lowercase_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_UpperCamelCase : Optional[int] = 2
for i in range(len(lowercase_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : int = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Tuple = MobileViTVaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
_UpperCamelCase : Dict = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
lowercase_ )
_UpperCamelCase : List[Any] = self.default_image_processor
_UpperCamelCase : Any = prepare_img()
_UpperCamelCase : Tuple = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
_UpperCamelCase : Optional[Any] = model(**lowercase_ )
# verify the logits
_UpperCamelCase : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
_UpperCamelCase : int = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
_UpperCamelCase : Dict = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
_UpperCamelCase : Dict = model.to(lowercase_ )
_UpperCamelCase : Optional[Any] = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : Union[str, Any] = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
_UpperCamelCase : List[Any] = model(**lowercase_ )
_UpperCamelCase : Optional[int] = outputs.logits
# verify the logits
_UpperCamelCase : List[str] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowercase_ )
_UpperCamelCase : Any = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
] , device=lowercase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
_UpperCamelCase : Optional[Any] = model.to(lowercase_ )
_UpperCamelCase : Optional[Any] = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
_UpperCamelCase : str = prepare_img()
_UpperCamelCase : str = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
_UpperCamelCase : Any = model(**lowercase_ )
_UpperCamelCase : str = outputs.logits.detach().cpu()
_UpperCamelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowercase_ , target_sizes=[(50, 60)] )
_UpperCamelCase : Union[str, Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowercase_ )
_UpperCamelCase : str = image_processor.post_process_semantic_segmentation(outputs=lowercase_ )
_UpperCamelCase : Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowercase_ )
| 371
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCamelCase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "rag"
SCREAMING_SNAKE_CASE__ :List[str] = True
def __init__( self : List[Any] , __a : Optional[Any]=None , __a : str=True , __a : Tuple=None , __a : Dict=None , __a : Optional[int]=None , __a : Optional[int]=None , __a : List[Any]=None , __a : Dict=" / " , __a : int=" // " , __a : Optional[Any]=5 , __a : Dict=300 , __a : Optional[int]=768 , __a : Tuple=8 , __a : Union[str, Any]="wiki_dpr" , __a : Dict="train" , __a : List[Any]="compressed" , __a : str=None , __a : Tuple=None , __a : int=False , __a : str=False , __a : Optional[int]=0.0 , __a : Dict=True , __a : Tuple=False , __a : Dict=False , __a : str=False , __a : str=True , __a : Optional[Any]=None , **__a : Tuple , ) -> Any:
super().__init__(
bos_token_id=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , is_encoder_decoder=__a , prefix=__a , vocab_size=__a , **__a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_UpperCamelCase : Optional[int] = kwargs.pop("question_encoder" )
_UpperCamelCase : str = question_encoder_config.pop("model_type" )
_UpperCamelCase : Tuple = kwargs.pop("generator" )
_UpperCamelCase : str = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_UpperCamelCase : Union[str, Any] = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : str = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : Optional[int] = reduce_loss
_UpperCamelCase : str = label_smoothing
_UpperCamelCase : int = exclude_bos_score
_UpperCamelCase : List[str] = do_marginalize
_UpperCamelCase : Optional[int] = title_sep
_UpperCamelCase : Optional[int] = doc_sep
_UpperCamelCase : Union[str, Any] = n_docs
_UpperCamelCase : Tuple = max_combined_length
_UpperCamelCase : Union[str, Any] = dataset
_UpperCamelCase : Any = dataset_split
_UpperCamelCase : List[str] = index_name
_UpperCamelCase : int = retrieval_vector_size
_UpperCamelCase : str = retrieval_batch_size
_UpperCamelCase : Dict = passages_path
_UpperCamelCase : str = index_path
_UpperCamelCase : Tuple = use_dummy_dataset
_UpperCamelCase : Union[str, Any] = output_retrieved
_UpperCamelCase : Optional[Any] = do_deduplication
_UpperCamelCase : str = use_cache
if self.forced_eos_token_id is None:
_UpperCamelCase : List[str] = getattr(self.generator , "forced_eos_token_id" , __a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Dict = copy.deepcopy(self.__dict__ )
_UpperCamelCase : List[Any] = self.question_encoder.to_dict()
_UpperCamelCase : Tuple = self.generator.to_dict()
_UpperCamelCase : Any = self.__class__.model_type
return output
| 310
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowercase__ ( lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase : List[str] = analyze_text(__a )
_UpperCamelCase : Dict = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
_UpperCamelCase : Tuple = sum(single_char_strings.values() )
# one length string
_UpperCamelCase : int = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_UpperCamelCase : Dict = single_char_strings[ch]
_UpperCamelCase : Optional[Any] = my_str / all_sum
my_fir_sum += prob * math.loga(__a ) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_UpperCamelCase : Optional[int] = sum(two_char_strings.values() )
_UpperCamelCase : int = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_UpperCamelCase : Union[str, Any] = cha + cha
if sequence in two_char_strings:
_UpperCamelCase : Optional[int] = two_char_strings[sequence]
_UpperCamelCase : List[Any] = int(__a ) / all_sum
my_sec_sum += prob * math.loga(__a )
# print second entropy
print(F'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : int = Counter() # type: ignore
_UpperCamelCase : Tuple = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(__a ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowercase__ ( ) -> str:
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 350
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : List[Any] , __a : str=13 , __a : Any=30 , __a : List[str]=2 , __a : Dict=3 , __a : Union[str, Any]=True , __a : Dict=True , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : str=0.1 , __a : Optional[int]=0.1 , __a : Union[str, Any]=10 , __a : Optional[Any]=0.02 , __a : List[Any]=None , __a : str=2 , ) -> int:
_UpperCamelCase : Tuple = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : int = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = type_sequence_label_size
_UpperCamelCase : int = initializer_range
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Any = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCamelCase : Optional[int] = num_patches + 1
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Union[str, Any] = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Any = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = ViTModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[int] , __a : int ) -> Optional[int]:
_UpperCamelCase : Tuple = ViTForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Union[str, Any] = ViTForMaskedImageModeling(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : Dict = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : int , __a : Dict ) -> int:
_UpperCamelCase : Any = self.type_sequence_label_size
_UpperCamelCase : Optional[Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : int = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Union[str, Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : List[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : Union[str, Any] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :Any = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :str = True
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Dict = ViTModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__a )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[str] = [*signature.parameters.keys()]
_UpperCamelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[str] = ViTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : List[Any] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__a )
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[Any] = prepare_img()
_UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : Dict = model(**__a )
# verify the logits
_UpperCamelCase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_UpperCamelCase : List[str] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__a )
_UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : str = model(__a , interpolate_pos_encoding=__a )
# verify the logits
_UpperCamelCase : int = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , __a )
_UpperCamelCase : int = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
_UpperCamelCase : int = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : Union[str, Any] = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCamelCase : int = model(__a )
| 310
| 0
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : Dict , __a : int=3 , __a : Any=32 , __a : Dict=3 , __a : Tuple=10 , __a : str=[10, 20, 30, 40] , __a : Union[str, Any]=[1, 1, 2, 1] , __a : Dict=True , __a : int=True , __a : Union[str, Any]="relu" , __a : int=3 , __a : Dict=None , ) -> str:
_UpperCamelCase : Union[str, Any] = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : Optional[Any] = num_channels
_UpperCamelCase : Dict = embeddings_size
_UpperCamelCase : Optional[int] = hidden_sizes
_UpperCamelCase : int = depths
_UpperCamelCase : List[Any] = is_training
_UpperCamelCase : List[str] = use_labels
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Dict = num_labels
_UpperCamelCase : int = scope
_UpperCamelCase : Union[str, Any] = len(lowercase_ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
_UpperCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : str = None
if self.use_labels:
_UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Union[str, Any] , __a : str , __a : int ) -> int:
_UpperCamelCase : Optional[Any] = TFResNetModel(config=lowercase_ )
_UpperCamelCase : Optional[Any] = model(lowercase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[int] , __a : Any , __a : Union[str, Any] ) -> Tuple:
_UpperCamelCase : List[str] = self.num_labels
_UpperCamelCase : List[Any] = TFResNetForImageClassification(lowercase_ )
_UpperCamelCase : List[str] = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
_UpperCamelCase : Tuple = config_and_inputs
_UpperCamelCase : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Optional[int] = False
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :Dict = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :str = False
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : str = TFResNetModelTester(self )
_UpperCamelCase : List[str] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Optional[int] = model_class(lowercase_ )
_UpperCamelCase : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Optional[int] = [*signature.parameters.keys()]
_UpperCamelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
def check_hidden_states_output(__a : Any , __a : Any , __a : Optional[Any] ):
_UpperCamelCase : Union[str, Any] = model_class(lowercase_ )
_UpperCamelCase : Tuple = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
_UpperCamelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : Tuple = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[int] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase : Dict = layer_type
_UpperCamelCase : Optional[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : List[str] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Dict = TFResNetModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowercase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_UpperCamelCase : int = self.default_image_processor
_UpperCamelCase : Union[str, Any] = prepare_img()
_UpperCamelCase : int = image_processor(images=lowercase_ , return_tensors="tf" )
# forward pass
_UpperCamelCase : str = model(**lowercase_ )
# verify the logits
_UpperCamelCase : List[str] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
_UpperCamelCase : List[str] = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowercase_ , atol=1e-4 ) )
| 351
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[int] = -1
_UpperCamelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Any = TextStreamer(__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Optional[int] = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Dict = -1
_UpperCamelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : List[str] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[int] = tokenizer.decode(greedy_ids[0] )
_UpperCamelCase : Tuple = TextIteratorStreamer(__a )
_UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : Optional[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
_UpperCamelCase : Tuple = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Union[str, Any] = -1
_UpperCamelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : str = greedy_ids[:, input_ids.shape[1] :]
_UpperCamelCase : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Optional[int] = TextStreamer(__a , skip_prompt=__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Tuple = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("distilgpt2" )
_UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a )
_UpperCamelCase : int = -1
_UpperCamelCase : Any = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCamelCase : List[str] = TextStreamer(__a , skip_special_tokens=__a )
model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCamelCase : int = cs.out[:-1] # Remove the final "\n"
_UpperCamelCase : int = tokenizer(__a , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[Any] = -1
_UpperCamelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Any = TextIteratorStreamer(__a , timeout=0.0_01 )
_UpperCamelCase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : List[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__a ):
_UpperCamelCase : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 310
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
_UpperCamelCase : Union[str, Any] = '''ZinengTang/tvlt-base'''
_UpperCamelCase : List[Any] = tempfile.mkdtemp()
def __SCREAMING_SNAKE_CASE ( self : List[Any] , **__a : Union[str, Any] ) -> str:
return TvltImageProcessor.from_pretrained(self.checkpoint , **a_ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , **__a : Dict ) -> Optional[Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **a_ )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
_UpperCamelCase : int = self.get_image_processor()
_UpperCamelCase : List[str] = self.get_feature_extractor()
_UpperCamelCase : str = TvltProcessor(image_processor=a_ , feature_extractor=a_ )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase : Tuple = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , a_ )
self.assertIsInstance(processor.image_processor , a_ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Optional[Any] = self.get_image_processor()
_UpperCamelCase : List[Any] = self.get_feature_extractor()
_UpperCamelCase : Optional[int] = TvltProcessor(image_processor=a_ , feature_extractor=a_ )
_UpperCamelCase : Dict = np.ones([1_2000] )
_UpperCamelCase : Dict = feature_extractor(a_ , return_tensors="np" )
_UpperCamelCase : Optional[int] = processor(audio=a_ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Any:
_UpperCamelCase : int = self.get_image_processor()
_UpperCamelCase : Optional[int] = self.get_feature_extractor()
_UpperCamelCase : Tuple = TvltProcessor(image_processor=a_ , feature_extractor=a_ )
_UpperCamelCase : str = np.ones([3, 224, 224] )
_UpperCamelCase : List[str] = image_processor(a_ , return_tensors="np" )
_UpperCamelCase : Optional[int] = processor(images=a_ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : Dict = self.get_image_processor()
_UpperCamelCase : int = self.get_feature_extractor()
_UpperCamelCase : List[Any] = TvltProcessor(image_processor=a_ , feature_extractor=a_ )
_UpperCamelCase : Union[str, Any] = np.ones([1_2000] )
_UpperCamelCase : Optional[int] = np.ones([3, 224, 224] )
_UpperCamelCase : Union[str, Any] = processor(audio=a_ , images=a_ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
_UpperCamelCase : str = self.get_image_processor()
_UpperCamelCase : List[str] = self.get_feature_extractor()
_UpperCamelCase : int = TvltProcessor(image_processor=a_ , feature_extractor=a_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 352
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
with open(lowercase_ ) as metadata_file:
_UpperCamelCase : Dict = json.load(lowercase_ )
_UpperCamelCase : str = LukeConfig(use_entity_aware_attention=lowercase_ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
_UpperCamelCase : str = torch.load(lowercase_ ,map_location="cpu" )["module"]
# Load the entity vocab file
_UpperCamelCase : Dict = load_original_entity_vocab(lowercase_ )
# add an entry for [MASK2]
_UpperCamelCase : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCamelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCamelCase : Dict = AddedToken("<ent>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
_UpperCamelCase : Union[str, Any] = AddedToken("<ent2>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"r" ) as f:
_UpperCamelCase : Tuple = json.load(lowercase_ )
_UpperCamelCase : Optional[int] = "MLukeTokenizer"
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
with open(os.path.join(lowercase_ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
_UpperCamelCase : int = MLukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(["@"] )[0]
_UpperCamelCase : str = tokenizer.convert_tokens_to_ids(["#"] )[0]
_UpperCamelCase : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
_UpperCamelCase : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 )
_UpperCamelCase : List[str] = word_emb[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCamelCase : Optional[Any] = state_dict[bias_name]
_UpperCamelCase : List[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCamelCase : Tuple = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCamelCase : Tuple = F'''encoder.layer.{layer_index}.attention.self.'''
_UpperCamelCase : List[Any] = state_dict[prefix + matrix_name]
_UpperCamelCase : str = state_dict[prefix + matrix_name]
_UpperCamelCase : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCamelCase : Any = state_dict["entity_embeddings.entity_embeddings.weight"]
_UpperCamelCase : Tuple = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : int = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCamelCase : int = state_dict["entity_predictions.bias"]
_UpperCamelCase : Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCamelCase : str = LukeForMaskedLM(config=lowercase_ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
_UpperCamelCase : List[str] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
_UpperCamelCase : Union[str, Any] = state_dict[key]
else:
_UpperCamelCase : Dict = state_dict[key]
_UpperCamelCase, _UpperCamelCase : Optional[Any] = model.load_state_dict(lowercase_ ,strict=lowercase_ )
if set(lowercase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(lowercase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ,task="entity_classification" )
_UpperCamelCase : Dict = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
_UpperCamelCase : Optional[Any] = (0, 9)
_UpperCamelCase : int = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : List[str] = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 33, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 1, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ )
_UpperCamelCase : int = "Tokyo is the capital of <mask>."
_UpperCamelCase : List[Any] = (24, 30)
_UpperCamelCase : Any = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : Optional[Any] = model(**lowercase_ )
_UpperCamelCase : int = encoding["input_ids"][0].tolist()
_UpperCamelCase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
_UpperCamelCase : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase_ )
_UpperCamelCase : Union[str, Any] = outputs.entity_logits[0][0].argmax().item()
_UpperCamelCase : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[str] = ["[MASK]", "[PAD]", "[UNK]"]
_UpperCamelCase : Tuple = [json.loads(lowercase_ ) for line in open(lowercase_ )]
_UpperCamelCase : List[str] = {}
for entry in data:
_UpperCamelCase : Any = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCamelCase : Dict = entity_id
break
_UpperCamelCase : Dict = F'''{language}:{entity_name}'''
_UpperCamelCase : str = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCamelCase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 310
| 0
|
"""simple docstring"""
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
lowerCamelCase__ = """bert-base-cased"""
lowerCamelCase__ = """fp16"""
lowerCamelCase__ = """bf16"""
lowerCamelCase__ = [FPaa, BFaa]
@require_fsdp
@require_cuda
class __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
super().setUp()
_UpperCamelCase : List[str] = dict(
ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(snake_case__ ):
_UpperCamelCase : List[str] = self.dist_env.copy()
_UpperCamelCase : Optional[Any] = F'''{i + 1}'''
_UpperCamelCase : Optional[Any] = strategy
with mockenv_context(**snake_case__ ):
_UpperCamelCase : Union[str, Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(snake_case__ ):
_UpperCamelCase : str = self.dist_env.copy()
_UpperCamelCase : str = prefetch_policy
with mockenv_context(**snake_case__ ):
_UpperCamelCase : List[Any] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(snake_case__ ):
_UpperCamelCase : List[str] = self.dist_env.copy()
_UpperCamelCase : str = state_dict_type
with mockenv_context(**snake_case__ ):
_UpperCamelCase : str = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = AutoModel.from_pretrained(snake_case__ )
for policy in FSDP_AUTO_WRAP_POLICY:
_UpperCamelCase : List[str] = self.dist_env.copy()
_UpperCamelCase : int = policy
if policy == "TRANSFORMER_BASED_WRAP":
_UpperCamelCase : str = 'BertLayer'
elif policy == "SIZE_BASED_WRAP":
_UpperCamelCase : List[Any] = '2000'
with mockenv_context(**snake_case__ ):
_UpperCamelCase : Optional[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(snake_case__ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_UpperCamelCase : Any = self.dist_env.copy()
_UpperCamelCase : str = 'TRANSFORMER_BASED_WRAP'
_UpperCamelCase : Optional[Any] = 'T5Layer'
with mockenv_context(**snake_case__ ):
_UpperCamelCase : int = FullyShardedDataParallelPlugin()
with self.assertRaises(snake_case__ ) as cm:
fsdp_plugin.set_auto_wrap_policy(snake_case__ )
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception ) )
_UpperCamelCase : List[Any] = self.dist_env.copy()
_UpperCamelCase : Optional[int] = 'SIZE_BASED_WRAP'
_UpperCamelCase : List[str] = '0'
with mockenv_context(**snake_case__ ):
_UpperCamelCase : Tuple = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(snake_case__ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_UpperCamelCase : Dict = self.dist_env.copy()
_UpperCamelCase : int = mp_dtype
with mockenv_context(**snake_case__ ):
_UpperCamelCase : List[str] = Accelerator()
if mp_dtype == "fp16":
_UpperCamelCase : Any = torch.floataa
elif mp_dtype == "bf16":
_UpperCamelCase : List[Any] = torch.bfloataa
_UpperCamelCase : Tuple = MixedPrecision(param_dtype=snake_case__ , reduce_dtype=snake_case__ , buffer_dtype=snake_case__ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , snake_case__ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , snake_case__ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(snake_case__ )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_UpperCamelCase : List[Any] = self.dist_env.copy()
_UpperCamelCase : Dict = str(snake_case__ ).lower()
with mockenv_context(**snake_case__ ):
_UpperCamelCase : List[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=snake_case__ ) )
@require_fsdp
@require_multi_gpu
@slow
class __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
super().setUp()
_UpperCamelCase : List[str] = 0.82
_UpperCamelCase : int = [
'fsdp_shard_grad_op_transformer_based_wrap',
'fsdp_full_shard_transformer_based_wrap',
]
_UpperCamelCase : Any = {
'multi_gpu_fp16': 3200,
'fsdp_shard_grad_op_transformer_based_wrap_fp16': 2000,
'fsdp_full_shard_transformer_based_wrap_fp16': 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_UpperCamelCase : str = 160
_UpperCamelCase : Any = 160
_UpperCamelCase : Optional[Any] = inspect.getfile(accelerate.test_utils )
_UpperCamelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps"] )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
_UpperCamelCase : Dict = os.path.join(self.test_scripts_folder , "test_performance.py" )
_UpperCamelCase : Dict = ['accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp']
for config in self.performance_configs:
_UpperCamelCase : List[Any] = cmd.copy()
for i, strategy in enumerate(snake_case__ ):
if strategy.lower() in config:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no" )
else:
cmd_config.append("--mixed_precision=fp16" )
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
_UpperCamelCase : Tuple = os.path.join(self.test_scripts_folder , "test_checkpointing.py" )
_UpperCamelCase : Any = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
'--use_fsdp',
'--mixed_precision=fp16',
'--fsdp_transformer_layer_cls_to_wrap=BertLayer',
]
for i, strategy in enumerate(snake_case__ ):
_UpperCamelCase : List[str] = cmd.copy()
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
_UpperCamelCase : Any = len(snake_case__ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_UpperCamelCase : List[Any] = cmd_config[:state_dict_config_index]
cmd_config.append(F'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
"--partial_train_epoch=1",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
_UpperCamelCase : Any = cmd_config[:-1]
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdir , "epoch_0" )
cmd_config.extend(
[
F'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
_UpperCamelCase : int = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py" )
_UpperCamelCase : Dict = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_UpperCamelCase : str = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"] )
else:
cmd_config.extend(["--mixed_precision=no"] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"] )
for i, strategy in enumerate(snake_case__ ):
if strategy.lower() in spec:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
F'''--n_train={self.n_train}''',
F'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
| 353
|
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCamelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
lowerCamelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
lowerCamelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[List[List[str]]] , __a : List[List[str]] , __a : int = 1 , __a : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__a , hypotheses=__a , min_len=__a , max_len=__a )
}
| 310
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Optional[int] , __a : Optional[int]=13 , __a : int=3 , __a : int=224 , __a : Optional[Any]=30 , __a : Dict=400 , __a : Union[str, Any]=True , __a : str=None , __a : Any=True , __a : List[str]=[0.5, 0.5, 0.5] , __a : Tuple=[0.5, 0.5, 0.5] , ) -> Dict:
_UpperCamelCase : List[Any] = size if size is not None else {"height": 18, "width": 18}
_UpperCamelCase : Tuple = parent
_UpperCamelCase : Optional[int] = batch_size
_UpperCamelCase : Any = num_channels
_UpperCamelCase : Optional[Any] = image_size
_UpperCamelCase : Optional[int] = min_resolution
_UpperCamelCase : int = max_resolution
_UpperCamelCase : Any = do_resize
_UpperCamelCase : List[Any] = size
_UpperCamelCase : List[Any] = do_normalize
_UpperCamelCase : Union[str, Any] = image_mean
_UpperCamelCase : List[str] = image_std
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = ViTImageProcessor if is_vision_available() else None
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : str = EfficientFormerImageProcessorTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
return self.image_proc_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
_UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "size" ) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
_UpperCamelCase : Union[str, Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
_UpperCamelCase : Optional[int] = image_processor(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase : Optional[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
_UpperCamelCase : Union[str, Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
_UpperCamelCase : Tuple = image_processor(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
_UpperCamelCase : Union[str, Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
_UpperCamelCase : List[Any] = image_processor(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 354
|
"""simple docstring"""
from __future__ import annotations
from math import pi
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 0
|
"""simple docstring"""
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowerCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = None
@experimental
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
return _map_with_joblib(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = num_proc if num_proc <= len(snake_case_ ) else len(snake_case_ )
_UpperCamelCase : List[Any] = [] # We organize the splits ourselve (contiguous splits)
for index in range(snake_case_ ):
_UpperCamelCase : Union[str, Any] = len(snake_case_ ) // num_proc
_UpperCamelCase : Optional[Any] = len(snake_case_ ) % num_proc
_UpperCamelCase : str = div * index + min(snake_case_ ,snake_case_ )
_UpperCamelCase : int = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(snake_case_ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F'''Error dividing inputs iterable among processes. '''
F'''Total number of objects {len(snake_case_ )}, '''
F'''length: {sum(len(i[1] ) for i in split_kwds )}''' )
logger.info(
F'''Spawning {num_proc} processes for {len(snake_case_ )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' )
_UpperCamelCase : Optional[int] = None, None
if not disable_tqdm:
_UpperCamelCase : List[str] = (RLock(),), tqdm.set_lock
with Pool(snake_case_ ,initargs=snake_case_ ,initializer=snake_case_ ) as pool:
_UpperCamelCase : Optional[int] = pool.map(snake_case_ ,snake_case_ )
logger.info(F'''Finished {num_proc} processes''' )
_UpperCamelCase : Optional[int] = [obj for proc_res in mapped for obj in proc_res]
logger.info(F'''Unpacked {len(snake_case_ )} objects''' )
return mapped
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name ,n_jobs=snake_case_ ):
return joblib.Parallel()(
joblib.delayed(snake_case_ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : List[str] = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
_UpperCamelCase : List[Any] = None
| 355
|
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCamelCase__ = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCamelCase__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if "://" in dataset_path:
_UpperCamelCase : List[Any] = dataset_path.split("://" )[1]
return dataset_path
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = not is_remote_filesystem(lowercase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase_ ) ,fs._strip_protocol(lowercase_ ) )
else:
fs.mv(lowercase_ ,lowercase_ ,recursive=lowercase_ )
def lowercase__ ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn ,"reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_UpperCamelCase : Dict = None
_UpperCamelCase : str = None
_UpperCamelCase : str = threading.Lock()
| 310
| 0
|
"""simple docstring"""
from torch import nn
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 356
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 310
| 0
|
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase__ ( lowercase_ ,lowercase_=False ) -> Dict:
"""simple docstring"""
try:
_UpperCamelCase : Union[str, Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCamelCase : int = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCamelCase : List[str] = strtobool(A__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
lowerCamelCase__ = parse_flag_from_env("RUN_SLOW", default=False)
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
return unittest.skip("Test was skipped" )(A__ )
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests ,"test is slow" )(A__ )
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() ,"test requires only a CPU" )(A__ )
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() ,"test requires a GPU" )(A__ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() ,"test requires a XPU" )(A__ )
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() ,"test requires a `mps` backend support in `torch`" )(A__ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() ,"test requires the Hugging Face suite" )(A__ )
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() ,"test requires the bitsandbytes library" )(A__ )
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() ,"test requires TPU" )(A__ )
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 ,"test requires a GPU" )(A__ )
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 ,"test requires a XPU" )(A__ )
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 ,"test requires multiple GPUs" )(A__ )
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 ,"test requires multiple XPUs" )(A__ )
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() ,"test requires safetensors" )(A__ )
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() ,"test requires DeepSpeed" )(A__ )
def lowercase__ ( lowercase_ ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(is_torch_version(">=" ,"1.12.0" ) ,"test requires torch version >= 1.12.0" )(A__ )
def lowercase__ ( lowercase_=None ,lowercase_=None ) -> Dict:
"""simple docstring"""
if test_case is None:
return partial(A__ ,version=A__ )
return unittest.skipUnless(is_torch_version(">=" ,A__ ) ,F'''test requires torch version >= {version}''' )(A__ )
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() ,"test requires Tensorboard" )(A__ )
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() ,"test requires wandb" )(A__ )
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() ,"test requires comet_ml" )(A__ )
lowerCamelCase__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available ,"test requires at least one tracker to be available and for `comet_ml` to not be installed" ,)(A__ )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = True
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Any ) -> Dict:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase_ )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Union[mock.Mock, List[mock.Mock]] ) -> List[str]:
_UpperCamelCase : List[str] = mocks if isinstance(UpperCamelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = AcceleratorState()
_UpperCamelCase : Dict = tensor[None].clone().to(state.device )
_UpperCamelCase : Optional[int] = gather(A__ ).cpu()
_UpperCamelCase : Tuple = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] ,A__ ):
return False
return True
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Dict , __a : Any , __a : Any ) -> Tuple:
_UpperCamelCase : List[Any] = returncode
_UpperCamelCase : Tuple = stdout
_UpperCamelCase : Dict = stderr
async def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
while True:
_UpperCamelCase : int = await stream.readline()
if line:
callback(A__ )
else:
break
async def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=None ,lowercase_=False ,lowercase_=False ) -> Tuple:
"""simple docstring"""
if echo:
print("\nRunning: " ," ".join(A__ ) )
_UpperCamelCase : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=A__ ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=A__ ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : List[str] = []
def tee(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_="" ):
_UpperCamelCase : Dict = line.decode("utf-8" ).rstrip()
sink.append(A__ )
if not quiet:
print(A__ ,A__ ,file=A__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout ,lambda lowercase_ : tee(A__ ,A__ ,sys.stdout ,label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr ,lambda lowercase_ : tee(A__ ,A__ ,sys.stderr ,label="stderr:" ) ) ),
] ,timeout=A__ ,)
return _RunOutput(await p.wait() ,A__ ,A__ )
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=180 ,lowercase_=False ,lowercase_=True ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Any = asyncio.get_event_loop()
_UpperCamelCase : Tuple = loop.run_until_complete(
_stream_subprocess(A__ ,env=A__ ,stdin=A__ ,timeout=A__ ,quiet=A__ ,echo=A__ ) )
_UpperCamelCase : Optional[int] = " ".join(A__ )
if result.returncode > 0:
_UpperCamelCase : Tuple = "\n".join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
return result
class __SCREAMING_SNAKE_CASE ( __lowerCamelCase ):
'''simple docstring'''
pass
def lowercase__ ( lowercase_ ,lowercase_=False ) -> List[Any]:
"""simple docstring"""
try:
_UpperCamelCase : Tuple = subprocess.check_output(A__ ,stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(A__ ,"decode" ):
_UpperCamelCase : Any = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'''Command `{' '.join(A__ )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 357
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowerCamelCase__ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
lowerCamelCase__ = f"""https://www.google.com/search?q={query}&num=100"""
lowerCamelCase__ = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
lowerCamelCase__ = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
lowerCamelCase__ = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 310
| 0
|
"""simple docstring"""
import argparse
import datetime
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : int = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
_UpperCamelCase : Union[str, Any] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowercase_ ) < 11:
raise ValueError("Must be 10 characters long" )
# Get month
_UpperCamelCase : Any = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12" )
_UpperCamelCase : int = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get day
_UpperCamelCase : List[Any] = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31" )
# Get second separator
_UpperCamelCase : Union[str, Any] = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get year
_UpperCamelCase : Dict = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?" )
# Get datetime obj for validation
_UpperCamelCase : Dict = datetime.date(int(lowercase_ ) ,int(lowercase_ ) ,int(lowercase_ ) )
# Start math
if m <= 2:
_UpperCamelCase : str = y - 1
_UpperCamelCase : Union[str, Any] = m + 12
# maths var
_UpperCamelCase : Union[str, Any] = int(str(lowercase_ )[:2] )
_UpperCamelCase : str = int(str(lowercase_ )[2:] )
_UpperCamelCase : List[Any] = int(2.6 * m - 5.39 )
_UpperCamelCase : int = int(c / 4 )
_UpperCamelCase : List[Any] = int(k / 4 )
_UpperCamelCase : List[str] = int(d + k )
_UpperCamelCase : str = int(t + u + v + x )
_UpperCamelCase : Union[str, Any] = int(z - (2 * c) )
_UpperCamelCase : List[str] = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer." )
# Response
_UpperCamelCase : int = F'''Your date {date_input}, is a {days[str(lowercase_ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = argparse.ArgumentParser(
description=(
"Find out what day of the week nearly any date is or was. Enter "
"date as a string in the mm-dd-yyyy or mm/dd/yyyy format"
)
)
parser.add_argument(
"date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)"
)
lowerCamelCase__ = parser.parse_args()
zeller(args.date_input)
| 358
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = "xlm-roberta-xl"
def __init__( self : Any , __a : Tuple=25_0880 , __a : Optional[Any]=2560 , __a : List[str]=36 , __a : Any=32 , __a : Dict=1_0240 , __a : Optional[Any]="gelu" , __a : int=0.1 , __a : Tuple=0.1 , __a : str=514 , __a : Any=1 , __a : List[Any]=0.02 , __a : List[str]=1e-0_5 , __a : Optional[Any]=1 , __a : List[Any]=0 , __a : Tuple=2 , __a : int="absolute" , __a : Dict=True , __a : Dict=None , **__a : Tuple , ) -> str:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Dict = max_position_embeddings
_UpperCamelCase : Optional[Any] = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Union[str, Any] = use_cache
_UpperCamelCase : Optional[Any] = classifier_dropout
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCamelCase : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 310
| 0
|
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCamelCase__ = "src/diffusers"
lowerCamelCase__ = "."
# This is to make sure the diffusers module imported is the one in the repo.
lowerCamelCase__ = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowerCamelCase__ = spec.loader.load_module()
def lowercase__ ( lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
return line.startswith(_UpperCamelCase ) or len(_UpperCamelCase ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" ,_UpperCamelCase ) is not None
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Dict = object_name.split("." )
_UpperCamelCase : str = 0
# First let's find the module where our object lives.
_UpperCamelCase : Any = parts[i]
while i < len(_UpperCamelCase ) and not os.path.isfile(os.path.join(_UpperCamelCase ,F'''{module}.py''' ) ):
i += 1
if i < len(_UpperCamelCase ):
_UpperCamelCase : Tuple = os.path.join(_UpperCamelCase ,parts[i] )
if i >= len(_UpperCamelCase ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(_UpperCamelCase ,F'''{module}.py''' ) ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : Union[str, Any] = f.readlines()
# Now let's find the class / func in the code!
_UpperCamelCase : Optional[int] = ""
_UpperCamelCase : List[Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(_UpperCamelCase ) and re.search(rF'''^{indent}(class|def)\s+{name}(\(|\:)''' ,lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_UpperCamelCase ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_UpperCamelCase : str = line_index
while line_index < len(_UpperCamelCase ) and _should_continue(lines[line_index] ,_UpperCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCamelCase : Tuple = lines[start_index:line_index]
return "".join(_UpperCamelCase )
lowerCamelCase__ = re.compile(R"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
lowerCamelCase__ = re.compile(R"^\s*(\S+)->(\S+)(\s+.*|$)")
lowerCamelCase__ = re.compile(R"<FILL\s+[^>]*>")
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : int = code.split("\n" )
_UpperCamelCase : List[str] = 0
while idx < len(_UpperCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_UpperCamelCase ):
return re.search(r"^(\s*)\S" ,lines[idx] ).groups()[0]
return ""
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Dict = len(get_indent(_UpperCamelCase ) ) > 0
if has_indent:
_UpperCamelCase : List[str] = F'''class Bla:\n{code}'''
_UpperCamelCase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 ,preview=_UpperCamelCase )
_UpperCamelCase : Any = black.format_str(_UpperCamelCase ,mode=_UpperCamelCase )
_UpperCamelCase, _UpperCamelCase : Tuple = style_docstrings_in_code(_UpperCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowercase__ ( lowercase_ ,lowercase_=False ) -> Any:
"""simple docstring"""
with open(_UpperCamelCase ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : Tuple = f.readlines()
_UpperCamelCase : Tuple = []
_UpperCamelCase : List[str] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_UpperCamelCase ):
_UpperCamelCase : List[str] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : int = search.groups()
_UpperCamelCase : Union[str, Any] = find_code_in_diffusers(_UpperCamelCase )
_UpperCamelCase : Any = get_indent(_UpperCamelCase )
_UpperCamelCase : int = line_index + 1 if indent == theoretical_indent else line_index + 2
_UpperCamelCase : Dict = theoretical_indent
_UpperCamelCase : List[Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_UpperCamelCase : Dict = True
while line_index < len(_UpperCamelCase ) and should_continue:
line_index += 1
if line_index >= len(_UpperCamelCase ):
break
_UpperCamelCase : Optional[Any] = lines[line_index]
_UpperCamelCase : Optional[Any] = _should_continue(_UpperCamelCase ,_UpperCamelCase ) and re.search(F'''^{indent}# End copy''' ,_UpperCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCamelCase : Union[str, Any] = lines[start_index:line_index]
_UpperCamelCase : List[Any] = "".join(_UpperCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_UpperCamelCase : str = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(_UpperCamelCase ) is None]
_UpperCamelCase : List[Any] = "\n".join(_UpperCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(_UpperCamelCase ) > 0:
_UpperCamelCase : Union[str, Any] = replace_pattern.replace("with" ,"" ).split("," )
_UpperCamelCase : Union[str, Any] = [_re_replace_pattern.search(_UpperCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Tuple = pattern.groups()
_UpperCamelCase : Optional[int] = re.sub(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
if option.strip() == "all-casing":
_UpperCamelCase : List[Any] = re.sub(obja.lower() ,obja.lower() ,_UpperCamelCase )
_UpperCamelCase : Any = re.sub(obja.upper() ,obja.upper() ,_UpperCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_UpperCamelCase : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code )
_UpperCamelCase : Union[str, Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_UpperCamelCase : List[str] = lines[:start_index] + [theoretical_code] + lines[line_index:]
_UpperCamelCase : List[Any] = start_index + 1
if overwrite and len(_UpperCamelCase ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(_UpperCamelCase ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(_UpperCamelCase )
return diffs
def lowercase__ ( lowercase_ = False ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = glob.glob(os.path.join(_UpperCamelCase ,"**/*.py" ) ,recursive=_UpperCamelCase )
_UpperCamelCase : Dict = []
for filename in all_files:
_UpperCamelCase : Any = is_copy_consistent(_UpperCamelCase ,_UpperCamelCase )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(_UpperCamelCase ) > 0:
_UpperCamelCase : List[str] = "\n".join(_UpperCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase__ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 359
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( *__a : int , **__a : int ) -> List[Any]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , __a : Optional[int] , __a : str ) -> Optional[Any]:
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , image_processor=__a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : Union[str, Any] ) -> int:
_UpperCamelCase : Any = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
import datasets
_UpperCamelCase : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_UpperCamelCase : List[Any] = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
_UpperCamelCase : List[Any] = object_detector(__a , threshold=0.0 )
self.assertEqual(len(__a ) , len(__a ) )
for outputs in batch_outputs:
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
pass
@require_torch
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[str] = "hf-internal-testing/tiny-detr-mobilenetsv3"
_UpperCamelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
_UpperCamelCase : Any = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = "facebook/detr-resnet-50"
_UpperCamelCase : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : Union[str, Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : List[str] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Dict = "facebook/detr-resnet-50"
_UpperCamelCase : Optional[Any] = pipeline("object-detection" , model=__a )
_UpperCamelCase : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : Tuple = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
_UpperCamelCase : Tuple = 0.99_85
_UpperCamelCase : List[Any] = "facebook/detr-resnet-50"
_UpperCamelCase : List[str] = pipeline("object-detection" , model=__a )
_UpperCamelCase : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__a )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = "Narsil/layoutlmv3-finetuned-funsd"
_UpperCamelCase : int = 0.99_93
_UpperCamelCase : str = pipeline("object-detection" , model=__a , threshold=__a )
_UpperCamelCase : Union[str, Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 310
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase__ = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 360
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCamelCase__ = {"UserAgent": UserAgent().random}
def lowercase__ ( lowercase_ ) -> dict:
"""simple docstring"""
_UpperCamelCase : str = script.contents[0]
_UpperCamelCase : Any = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : str ) -> Tuple:
_UpperCamelCase : List[str] = F'''https://www.instagram.com/{username}/'''
_UpperCamelCase : Optional[Any] = self.get_json()
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> dict:
_UpperCamelCase : int = requests.get(self.url , headers=__a ).text
_UpperCamelCase : Union[str, Any] = BeautifulSoup(__a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : List[Any] ) -> str:
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : str ) -> str:
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
return self.user_data["username"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["full_name"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
return self.user_data["biography"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.user_data["business_email"]
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.user_data["external_url"]
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
return self.user_data["is_verified"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool:
return self.user_data["is_private"]
def lowercase__ ( lowercase_ = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
_UpperCamelCase : Union[str, Any] = InstagramUser(lowercase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,lowercase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = InstagramUser("github")
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 310
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCamelCase__ = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 361
|
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = tau * frequency / samplerate
_UpperCamelCase : Optional[int] = sin(lowercase_ )
_UpperCamelCase : Dict = cos(lowercase_ )
_UpperCamelCase : Any = _sin / (2 * q_factor)
_UpperCamelCase : str = (1 - _cos) / 2
_UpperCamelCase : Any = 1 - _cos
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : List[str] = -2 * _cos
_UpperCamelCase : Tuple = 1 - alpha
_UpperCamelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : List[str] = tau * frequency / samplerate
_UpperCamelCase : str = sin(lowercase_ )
_UpperCamelCase : Optional[Any] = cos(lowercase_ )
_UpperCamelCase : Dict = _sin / (2 * q_factor)
_UpperCamelCase : List[Any] = (1 + _cos) / 2
_UpperCamelCase : Optional[int] = -1 - _cos
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : int = -2 * _cos
_UpperCamelCase : str = 1 - alpha
_UpperCamelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Tuple = tau * frequency / samplerate
_UpperCamelCase : Optional[int] = sin(lowercase_ )
_UpperCamelCase : Dict = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Dict = _sin / 2
_UpperCamelCase : int = 0
_UpperCamelCase : str = -ba
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : Optional[int] = -2 * _cos
_UpperCamelCase : Optional[Any] = 1 - alpha
_UpperCamelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : str = tau * frequency / samplerate
_UpperCamelCase : Optional[Any] = sin(lowercase_ )
_UpperCamelCase : Optional[int] = cos(lowercase_ )
_UpperCamelCase : int = _sin / (2 * q_factor)
_UpperCamelCase : List[str] = 1 - alpha
_UpperCamelCase : int = -2 * _cos
_UpperCamelCase : Union[str, Any] = 1 + alpha
_UpperCamelCase : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : int = tau * frequency / samplerate
_UpperCamelCase : int = sin(lowercase_ )
_UpperCamelCase : List[Any] = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Optional[int] = 10 ** (gain_db / 40)
_UpperCamelCase : str = 1 + alpha * big_a
_UpperCamelCase : Union[str, Any] = -2 * _cos
_UpperCamelCase : Optional[int] = 1 - alpha * big_a
_UpperCamelCase : int = 1 + alpha / big_a
_UpperCamelCase : Optional[Any] = -2 * _cos
_UpperCamelCase : Any = 1 - alpha / big_a
_UpperCamelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = tau * frequency / samplerate
_UpperCamelCase : Any = sin(lowercase_ )
_UpperCamelCase : Union[str, Any] = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Union[str, Any] = 10 ** (gain_db / 40)
_UpperCamelCase : Dict = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase : int = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase : Dict = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase : int = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase : List[str] = 2 * sqrt(lowercase_ ) * alpha
_UpperCamelCase : Any = big_a * (pmc + aaa)
_UpperCamelCase : Dict = 2 * big_a * mpc
_UpperCamelCase : str = big_a * (pmc - aaa)
_UpperCamelCase : Dict = ppmc + aaa
_UpperCamelCase : List[Any] = -2 * pmpc
_UpperCamelCase : Dict = ppmc - aaa
_UpperCamelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Optional[int] = tau * frequency / samplerate
_UpperCamelCase : int = sin(lowercase_ )
_UpperCamelCase : Any = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : str = 10 ** (gain_db / 40)
_UpperCamelCase : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase : List[str] = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase : Dict = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase : Optional[Any] = 2 * sqrt(lowercase_ ) * alpha
_UpperCamelCase : List[Any] = big_a * (ppmc + aaa)
_UpperCamelCase : Dict = -2 * big_a * pmpc
_UpperCamelCase : Dict = big_a * (ppmc - aaa)
_UpperCamelCase : Optional[Any] = pmc + aaa
_UpperCamelCase : Any = 2 * mpc
_UpperCamelCase : Any = pmc - aaa
_UpperCamelCase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
| 310
| 0
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCamelCase__ = ["bert-base-uncased", "bert-base-cased"]
lowerCamelCase__ = "hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class __SCREAMING_SNAKE_CASE ( tf.keras.Model ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Optional[Any] ) -> Any:
super().__init__()
_UpperCamelCase : Tuple = tokenizer
_UpperCamelCase : Tuple = AutoConfig.from_pretrained(__a )
_UpperCamelCase : Optional[int] = TFAutoModel.from_config(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase : List[Any] = self.tokenizer(__a )
_UpperCamelCase : List[Any] = self.bert(**__a )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
super().setUp()
_UpperCamelCase : Dict = [
BertTokenizer.from_pretrained(__a ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_UpperCamelCase : Tuple = [TFBertTokenizer.from_pretrained(__a ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__a , use_fast_bert_tokenizer=__a )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_UpperCamelCase : Optional[int] = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
_UpperCamelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : Tuple = tokenizer(__a , return_tensors="tf" , padding="longest" )
_UpperCamelCase : int = tf_tokenizer(__a )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Any = tf_tokenizer(self.paired_sentences )
_UpperCamelCase : Dict = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Optional[int] = tf.function(__a )
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : Any = tf.constant(__a )
_UpperCamelCase : Tuple = compiled_tokenizer(__a )
_UpperCamelCase : Union[str, Any] = tf_tokenizer(__a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Dict = ModelToSave(tokenizer=__a )
_UpperCamelCase : List[str] = tf.convert_to_tensor(self.test_sentences )
_UpperCamelCase : Any = model(__a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_UpperCamelCase : Union[str, Any] = Path(__a ) / "saved.model"
model.save(__a )
_UpperCamelCase : Union[str, Any] = tf.keras.models.load_model(__a )
_UpperCamelCase : Union[str, Any] = loaded_model(__a )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 362
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ )
if weight_type is not None:
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ).shape
else:
_UpperCamelCase : int = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "weight_g":
_UpperCamelCase : int = value
elif weight_type == "weight_v":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "bias":
_UpperCamelCase : int = value
else:
_UpperCamelCase : Any = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[str] = []
_UpperCamelCase : Any = fairseq_model.state_dict()
_UpperCamelCase : Union[str, Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,hf_model.config.feat_extract_norm == "group" ,)
_UpperCamelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
_UpperCamelCase : Dict = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_UpperCamelCase : Any = True
if "*" in mapped_key:
_UpperCamelCase : Dict = name.split(lowercase_ )[0].split("." )[-2]
_UpperCamelCase : Any = mapped_key.replace("*" ,lowercase_ )
if "weight_g" in name:
_UpperCamelCase : str = "weight_g"
elif "weight_v" in name:
_UpperCamelCase : Any = "weight_v"
elif "weight" in name:
_UpperCamelCase : List[str] = "weight"
elif "bias" in name:
_UpperCamelCase : List[Any] = "bias"
else:
_UpperCamelCase : str = None
set_recursively(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Any = full_name.split("conv_layers." )[-1]
_UpperCamelCase : Optional[Any] = name.split("." )
_UpperCamelCase : Union[str, Any] = int(items[0] )
_UpperCamelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCamelCase : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCamelCase : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCamelCase : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCamelCase : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Dict = SEWConfig()
if is_finetuned:
_UpperCamelCase : Dict = model.wav_encoder.wav_model.cfg
else:
_UpperCamelCase : List[Any] = model.cfg
_UpperCamelCase : Any = fs_config.conv_bias
_UpperCamelCase : str = eval(fs_config.conv_feature_layers )
_UpperCamelCase : Any = [x[0] for x in conv_layers]
_UpperCamelCase : List[Any] = [x[1] for x in conv_layers]
_UpperCamelCase : Union[str, Any] = [x[2] for x in conv_layers]
_UpperCamelCase : str = "gelu"
_UpperCamelCase : List[str] = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
_UpperCamelCase : Optional[int] = 0.0
_UpperCamelCase : Dict = fs_config.activation_fn.name
_UpperCamelCase : Any = fs_config.encoder_embed_dim
_UpperCamelCase : Optional[Any] = 0.02
_UpperCamelCase : str = fs_config.encoder_ffn_embed_dim
_UpperCamelCase : int = 1e-5
_UpperCamelCase : Optional[int] = fs_config.encoder_layerdrop
_UpperCamelCase : str = fs_config.encoder_attention_heads
_UpperCamelCase : Tuple = fs_config.conv_pos_groups
_UpperCamelCase : List[str] = fs_config.conv_pos
_UpperCamelCase : Optional[int] = len(lowercase_ )
_UpperCamelCase : Union[str, Any] = fs_config.encoder_layers
_UpperCamelCase : Union[str, Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_UpperCamelCase : List[str] = model.cfg
_UpperCamelCase : List[str] = fs_config.final_dropout
_UpperCamelCase : Optional[Any] = fs_config.layerdrop
_UpperCamelCase : int = fs_config.activation_dropout
_UpperCamelCase : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_UpperCamelCase : int = fs_config.attention_dropout
_UpperCamelCase : int = fs_config.dropout_input
_UpperCamelCase : List[Any] = fs_config.dropout
_UpperCamelCase : List[Any] = fs_config.mask_channel_length
_UpperCamelCase : List[str] = fs_config.mask_channel_prob
_UpperCamelCase : Optional[Any] = fs_config.mask_length
_UpperCamelCase : Optional[int] = fs_config.mask_prob
_UpperCamelCase : List[str] = "Wav2Vec2FeatureExtractor"
_UpperCamelCase : Optional[Any] = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=True ) -> str:
"""simple docstring"""
if is_finetuned:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_UpperCamelCase : str = SEWConfig.from_pretrained(lowercase_ )
else:
_UpperCamelCase : Optional[int] = convert_config(model[0] ,lowercase_ )
_UpperCamelCase : List[str] = model[0].eval()
_UpperCamelCase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False
_UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=lowercase_ ,return_attention_mask=lowercase_ ,)
if is_finetuned:
if dict_path:
_UpperCamelCase : Union[str, Any] = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCamelCase : List[str] = target_dict.pad_index
_UpperCamelCase : Optional[int] = target_dict.bos_index
_UpperCamelCase : Any = target_dict.pad_index
_UpperCamelCase : List[Any] = target_dict.bos_index
_UpperCamelCase : List[str] = target_dict.eos_index
_UpperCamelCase : Optional[Any] = len(target_dict.symbols )
_UpperCamelCase : List[Any] = os.path.join(lowercase_ ,"vocab.json" )
if not os.path.isdir(lowercase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase_ ) )
return
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
with open(lowercase_ ,"w" ,encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices ,lowercase_ )
_UpperCamelCase : Optional[Any] = WavaVecaCTCTokenizer(
lowercase_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=lowercase_ ,)
_UpperCamelCase : List[str] = WavaVecaProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
_UpperCamelCase : List[Any] = SEWForCTC(lowercase_ )
else:
_UpperCamelCase : int = SEWModel(lowercase_ )
feature_extractor.save_pretrained(lowercase_ )
recursively_load_weights(lowercase_ ,lowercase_ ,lowercase_ )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowerCamelCase__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 310
| 0
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] , __a : list[int] ) -> str:
_UpperCamelCase : str = len(_a )
_UpperCamelCase : Optional[Any] = [0] * len_array
if len_array > 0:
_UpperCamelCase : Any = array[0]
for i in range(1 , _a ):
_UpperCamelCase : Dict = self.prefix_sum[i - 1] + array[i]
def __SCREAMING_SNAKE_CASE ( self : int , __a : int , __a : int ) -> Optional[Any]:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int ) -> Dict:
_UpperCamelCase : Optional[int] = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(_a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : int = prime_factors(lowercase_ )
if is_square_free(lowercase_ ):
return -1 if len(lowercase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 0
|
"""simple docstring"""
import math
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
if (
not isinstance(_a ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def lowercase__ ( lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
if (
not isinstance(_a ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = GPTaTokenizer
SCREAMING_SNAKE_CASE__ :Tuple = GPTaTokenizerFast
SCREAMING_SNAKE_CASE__ :Dict = True
SCREAMING_SNAKE_CASE__ :int = {"add_prefix_space": True}
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_UpperCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCamelCase : str = {"unk_token": "<unk>"}
_UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Optional[int] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Union[str, Any] ) -> int:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> Tuple:
_UpperCamelCase : List[Any] = "lower newer"
_UpperCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase : Optional[Any] = "lower newer"
_UpperCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_UpperCamelCase : Any = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : str = tokens + [tokenizer.unk_token]
_UpperCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = "lower newer"
# Testing tokenization
_UpperCamelCase : str = tokenizer.tokenize(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
_UpperCamelCase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
_UpperCamelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : List[Any] = tokenizer.encode(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
_UpperCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_UpperCamelCase : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : int , *__a : int , **__a : List[Any] ) -> Union[str, Any]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int=15 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
_UpperCamelCase : Optional[int] = "This is a simple input"
_UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Dict = ("This is a simple input", "This is a pair")
_UpperCamelCase : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
_UpperCamelCase : Union[str, Any] = "This is a simple input"
_UpperCamelCase : Optional[Any] = ["This is a simple input looooooooong", "This is a simple input"]
_UpperCamelCase : str = ("This is a simple input", "This is a pair")
_UpperCamelCase : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_UpperCamelCase : Union[str, Any] = tokenizer.pad_token_id
_UpperCamelCase : str = tokenizer(__a , padding="max_length" , max_length=30 , return_tensors="np" )
_UpperCamelCase : Tuple = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
_UpperCamelCase : str = tokenizer(*__a , padding="max_length" , max_length=60 , return_tensors="np" )
_UpperCamelCase : Optional[int] = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
_UpperCamelCase : Any = "$$$"
_UpperCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
_UpperCamelCase : int = "This is a simple input"
_UpperCamelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Union[str, Any] = tokenizer.bos_token_id
_UpperCamelCase : str = tokenizer(__a )
_UpperCamelCase : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCamelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_UpperCamelCase : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
_UpperCamelCase : Optional[Any] = [self.get_tokenizer(do_lower_case=__a , add_bos_token=__a )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : Tuple = "Encode this."
_UpperCamelCase : List[str] = "This one too please."
_UpperCamelCase : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a )
encoded_sequence += tokenizer.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer.encode_plus(
__a , __a , add_special_tokens=__a , return_special_tokens_mask=__a , )
_UpperCamelCase : str = encoded_sequence_dict["input_ids"]
_UpperCamelCase : Optional[int] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(__a ) , len(__a ) )
_UpperCamelCase : Union[str, Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__a )
]
_UpperCamelCase : Union[str, Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__a , __a )
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Any = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("test_opt" )
_UpperCamelCase : str = AutoTokenizer.from_pretrained("./test_opt" )
_UpperCamelCase : Optional[Any] = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
_UpperCamelCase : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Union[str, Any] = tokenizer.encode(
__a , )
# Same as above
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[str] = "bos"
_UpperCamelCase : Tuple = tokenizer.get_vocab()["bos"]
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : List[Any] = tokenizer.encode(
__a , )
# We changed the bos token
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("./tok" )
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
_UpperCamelCase : Tuple = tokenizer.encode(
__a , )
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
| 310
| 0
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_UpperCamelCase : List[str] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_UpperCamelCase : Optional[Any] = 4
_UpperCamelCase : Union[str, Any] = 48
_UpperCamelCase : int = "pixelshuffle_aux"
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_UpperCamelCase : Tuple = [6, 6, 6, 6]
_UpperCamelCase : Tuple = 60
_UpperCamelCase : Dict = [6, 6, 6, 6]
_UpperCamelCase : Dict = "pixelshuffledirect"
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_UpperCamelCase : List[Any] = 4
_UpperCamelCase : Optional[Any] = "nearest+conv"
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_UpperCamelCase : Any = 1
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Tuple = 126
_UpperCamelCase : Optional[Any] = 7
_UpperCamelCase : Any = 255.0
_UpperCamelCase : List[Any] = ""
return config
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
_UpperCamelCase : str = name.replace("patch_embed.proj" ,"embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_UpperCamelCase : List[Any] = name.replace("patch_embed.norm" ,"embeddings.patch_embeddings.layernorm" )
if "layers" in name:
_UpperCamelCase : Any = name.replace("layers" ,"encoder.stages" )
if "residual_group.blocks" in name:
_UpperCamelCase : List[str] = name.replace("residual_group.blocks" ,"layers" )
if "attn.proj" in name:
_UpperCamelCase : Union[str, Any] = name.replace("attn.proj" ,"attention.output.dense" )
if "attn" in name:
_UpperCamelCase : Union[str, Any] = name.replace("attn" ,"attention.self" )
if "norm1" in name:
_UpperCamelCase : str = name.replace("norm1" ,"layernorm_before" )
if "norm2" in name:
_UpperCamelCase : Dict = name.replace("norm2" ,"layernorm_after" )
if "mlp.fc1" in name:
_UpperCamelCase : Tuple = name.replace("mlp.fc1" ,"intermediate.dense" )
if "mlp.fc2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("mlp.fc2" ,"output.dense" )
if "q_bias" in name:
_UpperCamelCase : int = name.replace("q_bias" ,"query.bias" )
if "k_bias" in name:
_UpperCamelCase : List[Any] = name.replace("k_bias" ,"key.bias" )
if "v_bias" in name:
_UpperCamelCase : Dict = name.replace("v_bias" ,"value.bias" )
if "cpb_mlp" in name:
_UpperCamelCase : List[str] = name.replace("cpb_mlp" ,"continuous_position_bias_mlp" )
if "patch_embed.proj" in name:
_UpperCamelCase : Dict = name.replace("patch_embed.proj" ,"patch_embed.projection" )
if name == "norm.weight":
_UpperCamelCase : Tuple = "layernorm.weight"
if name == "norm.bias":
_UpperCamelCase : Optional[int] = "layernorm.bias"
if "conv_first" in name:
_UpperCamelCase : Tuple = name.replace("conv_first" ,"first_convolution" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_UpperCamelCase : Union[str, Any] = name.replace("conv_last" ,"final_convolution" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_UpperCamelCase : Optional[Any] = name.replace("conv_before_upsample.0" ,"conv_before_upsample" )
if "upsample.0" in name:
_UpperCamelCase : List[str] = name.replace("upsample.0" ,"upsample.convolution_0" )
if "upsample.2" in name:
_UpperCamelCase : List[str] = name.replace("upsample.2" ,"upsample.convolution_1" )
_UpperCamelCase : List[Any] = "upsample." + name
elif config.upsampler == "pixelshuffledirect":
_UpperCamelCase : Tuple = name.replace("upsample.0.weight" ,"upsample.conv.weight" )
_UpperCamelCase : List[str] = name.replace("upsample.0.bias" ,"upsample.conv.bias" )
else:
pass
else:
_UpperCamelCase : Optional[int] = "swin2sr." + name
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_UpperCamelCase : List[Any] = orig_state_dict.pop(_UpperCAmelCase )
if "qkv" in key:
_UpperCamelCase : Tuple = key.split("." )
_UpperCamelCase : Dict = int(key_split[1] )
_UpperCamelCase : Optional[Any] = int(key_split[4] )
_UpperCamelCase : int = config.embed_dim
if "weight" in key:
_UpperCamelCase : Optional[Any] = val[:dim, :]
_UpperCamelCase : Optional[int] = val[dim : dim * 2, :]
_UpperCamelCase : Dict = val[-dim:, :]
else:
_UpperCamelCase : Optional[Any] = val[:dim]
_UpperCamelCase : List[Any] = val[dim : dim * 2]
_UpperCamelCase : Optional[int] = val[-dim:]
pass
else:
_UpperCamelCase : Tuple = val
return orig_state_dict
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = get_config(_UpperCAmelCase )
_UpperCamelCase : Tuple = SwinaSRForImageSuperResolution(_UpperCAmelCase )
model.eval()
_UpperCamelCase : List[Any] = torch.hub.load_state_dict_from_url(_UpperCAmelCase ,map_location="cpu" )
_UpperCamelCase : int = convert_state_dict(_UpperCAmelCase ,_UpperCAmelCase )
_UpperCamelCase : Any = model.load_state_dict(_UpperCAmelCase ,strict=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ValueError("Missing keys when converting: {}".format(_UpperCAmelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'''Unexpected key {key} in state_dict''' )
# verify values
_UpperCamelCase : Tuple = "https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"
_UpperCamelCase : int = Image.open(requests.get(_UpperCAmelCase ,stream=_UpperCAmelCase ).raw ).convert("RGB" )
_UpperCamelCase : str = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_UpperCamelCase : Tuple = 126 if "Jpeg" in checkpoint_url else 256
_UpperCamelCase : str = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
_UpperCamelCase : List[str] = transforms(_UpperCAmelCase ).unsqueeze(0 )
if config.num_channels == 1:
_UpperCamelCase : Tuple = pixel_values[:, 0, :, :].unsqueeze(1 )
_UpperCamelCase : int = model(_UpperCAmelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_UpperCamelCase : Any = torch.Size([1, 3, 512, 512] )
_UpperCamelCase : str = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_UpperCamelCase : int = torch.Size([1, 3, 1_024, 1_024] )
_UpperCamelCase : Dict = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_UpperCamelCase : Optional[int] = torch.Size([1, 3, 1_024, 1_024] )
_UpperCamelCase : Union[str, Any] = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_UpperCamelCase : Tuple = torch.Size([1, 3, 512, 512] )
_UpperCamelCase : List[str] = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_UpperCamelCase : Dict = torch.Size([1, 3, 1_024, 1_024] )
_UpperCamelCase : Optional[Any] = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] ,_UpperCAmelCase ,atol=1e-3 )
print("Looks ok!" )
_UpperCamelCase : Optional[int] = {
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": (
"swin2SR-classical-sr-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": (
"swin2SR-classical-sr-x4-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": (
"swin2SR-compressed-sr-x4-48"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": (
"swin2SR-lightweight-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": (
"swin2SR-realworld-sr-x4-64-bsrgan-psnr"
),
}
_UpperCamelCase : int = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_UpperCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
model.push_to_hub(F'''caidas/{model_name}''' )
processor.push_to_hub(F'''caidas/{model_name}''' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
lowerCamelCase__ = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 365
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCamelCase__ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __SCREAMING_SNAKE_CASE ( unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = load_tool("text-question-answering" )
self.tool.setup()
_UpperCamelCase : Union[str, Any] = load_tool("text-question-answering" , remote=__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Dict = self.tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.remote_tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Dict = self.tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : List[Any] = self.remote_tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
| 310
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = ConsistencyModelPipeline
SCREAMING_SNAKE_CASE__ :List[str] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ :Any = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
SCREAMING_SNAKE_CASE__ :Any = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
_UpperCamelCase : Union[str, Any] = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[Any]=False ) -> Optional[Any]:
if class_cond:
_UpperCamelCase : List[Any] = self.dummy_cond_unet
else:
_UpperCamelCase : Dict = self.dummy_uncond_unet
# Default to CM multistep sampler
_UpperCamelCase : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_UpperCamelCase : List[str] = {
"unet": unet,
"scheduler": scheduler,
}
return components
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : List[str]=0 ) -> Optional[int]:
if str(lowerCAmelCase__ ).startswith("mps" ):
_UpperCamelCase : int = torch.manual_seed(lowerCAmelCase__ )
else:
_UpperCamelCase : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCamelCase : str = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : str = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Optional[int] = self.get_dummy_components()
_UpperCamelCase : Any = ConsistencyModelPipeline(**lowerCAmelCase__ )
_UpperCamelCase : int = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : Dict = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase : int = pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : int = image[0, -3:, -3:, -1]
_UpperCamelCase : int = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
_UpperCamelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Dict = self.get_dummy_components(class_cond=lowerCAmelCase__ )
_UpperCamelCase : List[str] = ConsistencyModelPipeline(**lowerCAmelCase__ )
_UpperCamelCase : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase : List[str] = 0
_UpperCamelCase : Optional[int] = pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : List[str] = image[0, -3:, -3:, -1]
_UpperCamelCase : Optional[int] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
_UpperCamelCase : str = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Union[str, Any] = self.get_dummy_components()
_UpperCamelCase : List[str] = ConsistencyModelPipeline(**lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : Dict = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : List[Any] = None
_UpperCamelCase : str = pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : List[str] = image[0, -3:, -3:, -1]
_UpperCamelCase : int = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Optional[int] = self.get_dummy_components(class_cond=lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = ConsistencyModelPipeline(**lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : Tuple = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase : str = 1
_UpperCamelCase : Tuple = None
_UpperCamelCase : Any = 0
_UpperCamelCase : str = pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
_UpperCamelCase : Union[str, Any] = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Tuple=0 , __a : List[Any]=False , __a : Tuple="cpu" , __a : Any=torch.floataa , __a : List[str]=(1, 3, 64, 64) ) -> Any:
_UpperCamelCase : List[str] = torch.manual_seed(lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
_UpperCamelCase : str = self.get_fixed_latents(seed=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__ , shape=lowerCAmelCase__ )
_UpperCamelCase : str = latents
return inputs
def __SCREAMING_SNAKE_CASE ( self : Any , __a : str=0 , __a : Union[str, Any]="cpu" , __a : List[str]=torch.floataa , __a : str=(1, 3, 64, 64) ) -> Optional[int]:
if type(lowerCAmelCase__ ) == str:
_UpperCamelCase : Tuple = torch.device(lowerCAmelCase__ )
_UpperCamelCase : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCamelCase : int = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
return latents
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
_UpperCamelCase : Union[str, Any] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
_UpperCamelCase : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_UpperCamelCase : Tuple = ConsistencyModelPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
pipe.to(torch_device=lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = self.get_inputs()
_UpperCamelCase : int = pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase : str = image[0, -3:, -3:, -1]
_UpperCamelCase : Tuple = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : int = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
_UpperCamelCase : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_UpperCamelCase : List[str] = ConsistencyModelPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
pipe.to(torch_device=lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : int = self.get_inputs()
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : Union[str, Any] = None
_UpperCamelCase : Tuple = pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase : Any = image[0, -3:, -3:, -1]
_UpperCamelCase : Tuple = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def __SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
_UpperCamelCase : List[Any] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
_UpperCamelCase : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_UpperCamelCase : List[Any] = ConsistencyModelPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
pipe.to(torch_device=lowerCAmelCase__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : Tuple = self.get_inputs(get_fixed_latents=lowerCAmelCase__ , device=lowerCAmelCase__ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCAmelCase__ , enable_math=lowerCAmelCase__ , enable_mem_efficient=lowerCAmelCase__ ):
_UpperCamelCase : List[Any] = pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCamelCase : Optional[Any] = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : Any = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
_UpperCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_UpperCamelCase : Optional[Any] = ConsistencyModelPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
pipe.to(torch_device=lowerCAmelCase__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : List[str] = self.get_inputs(get_fixed_latents=lowerCAmelCase__ , device=lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = 1
_UpperCamelCase : int = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCAmelCase__ , enable_math=lowerCAmelCase__ , enable_mem_efficient=lowerCAmelCase__ ):
_UpperCamelCase : Union[str, Any] = pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
_UpperCamelCase : str = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 366
|
"""simple docstring"""
lowerCamelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Tuple = [False] * len(lowercase_ )
_UpperCamelCase : Dict = [s]
_UpperCamelCase : List[str] = True
while queue:
_UpperCamelCase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase_ )
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : List[str] = u
return visited[t]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = [-1] * (len(lowercase_ ))
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : str = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ):
_UpperCamelCase : int = float("Inf" )
_UpperCamelCase : Optional[Any] = sink
while s != source:
# Find the minimum value in select path
_UpperCamelCase : List[Any] = min(lowercase_ ,graph[parent[s]][s] )
_UpperCamelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_UpperCamelCase : Union[str, Any] = sink
while v != source:
_UpperCamelCase : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCamelCase : Dict = parent[v]
for i in range(len(lowercase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 310
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
_UpperCamelCase : Dict = "laion/clap-htsat-unfused"
_UpperCamelCase : Optional[int] = tempfile.mkdtemp()
def __SCREAMING_SNAKE_CASE ( self : List[Any] , **__a : str ) -> Optional[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : str , **__a : List[Any] ) -> Any:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
_UpperCamelCase : Union[str, Any] = self.get_feature_extractor()
_UpperCamelCase : int = ClapProcessor(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase : Dict = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase : Optional[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCamelCase : List[str] = self.get_feature_extractor(do_normalize=__lowerCamelCase , padding_value=1.0 )
_UpperCamelCase : List[str] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Union[str, Any] = self.get_feature_extractor()
_UpperCamelCase : List[Any] = self.get_tokenizer()
_UpperCamelCase : Any = ClapProcessor(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase )
_UpperCamelCase : Tuple = floats_list((3, 1000) )
_UpperCamelCase : int = feature_extractor(__lowerCamelCase , return_tensors="np" )
_UpperCamelCase : str = processor(audios=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = self.get_feature_extractor()
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
_UpperCamelCase : Dict = ClapProcessor(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase )
_UpperCamelCase : List[Any] = "This is a test string"
_UpperCamelCase : Optional[int] = processor(text=__lowerCamelCase )
_UpperCamelCase : Optional[int] = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
_UpperCamelCase : Optional[Any] = self.get_feature_extractor()
_UpperCamelCase : Union[str, Any] = self.get_tokenizer()
_UpperCamelCase : Optional[Any] = ClapProcessor(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase )
_UpperCamelCase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCamelCase : int = processor.batch_decode(__lowerCamelCase )
_UpperCamelCase : List[str] = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[Any] = self.get_feature_extractor()
_UpperCamelCase : str = self.get_tokenizer()
_UpperCamelCase : List[str] = ClapProcessor(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 367
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(lowercase_ ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase_ ,(list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase_ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ["pixel_values"]
def __init__( self : List[str] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : List[Any] , ) -> None:
super().__init__(**__a )
_UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 256}
_UpperCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : int = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCamelCase : Optional[Any] = get_size_dict(__a , param_name="crop_size" )
_UpperCamelCase : str = do_resize
_UpperCamelCase : Dict = size
_UpperCamelCase : int = do_center_crop
_UpperCamelCase : int = crop_size
_UpperCamelCase : Optional[Any] = resample
_UpperCamelCase : Dict = do_rescale
_UpperCamelCase : Any = rescale_factor
_UpperCamelCase : Any = offset
_UpperCamelCase : Union[str, Any] = do_normalize
_UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __SCREAMING_SNAKE_CASE ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray:
_UpperCamelCase : Any = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" in size:
_UpperCamelCase : str = get_resize_output_image_size(__a , size["shortest_edge"] , default_to_square=__a )
elif "height" in size and "width" in size:
_UpperCamelCase : Any = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ) -> np.ndarray:
_UpperCamelCase : List[Any] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Union[int, float] , __a : bool = True , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> Optional[Any]:
_UpperCamelCase : Any = image.astype(np.floataa )
if offset:
_UpperCamelCase : Dict = image - (scale / 2)
return rescale(__a , scale=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
_UpperCamelCase : Optional[Any] = to_numpy_array(__a )
if do_resize:
_UpperCamelCase : Any = self.resize(image=__a , size=__a , resample=__a )
if do_center_crop:
_UpperCamelCase : Dict = self.center_crop(__a , size=__a )
if do_rescale:
_UpperCamelCase : Union[str, Any] = self.rescale(image=__a , scale=__a , offset=__a )
if do_normalize:
_UpperCamelCase : int = self.normalize(image=__a , mean=__a , std=__a )
_UpperCamelCase : str = to_channel_dimension_format(__a , __a )
return image
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
_UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Optional[int] = resample if resample is not None else self.resample
_UpperCamelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : str = offset if offset is not None else self.offset
_UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : Tuple = image_std if image_std is not None else self.image_std
_UpperCamelCase : int = size if size is not None else self.size
_UpperCamelCase : Tuple = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase : Optional[int] = get_size_dict(__a , param_name="crop_size" )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
_UpperCamelCase : Union[str, Any] = make_batched(__a )
_UpperCamelCase : Optional[Any] = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
_UpperCamelCase : List[Any] = {"pixel_values": videos}
return BatchFeature(data=__a , tensor_type=__a )
| 310
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
_UpperCamelCase : int = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
_UpperCamelCase : int = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_lowerCamelCase ) , _lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : str = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_lowerCamelCase ) , x.transpose() ) )
_UpperCamelCase : List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_lowerCamelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : int = np.random.randn(3 , 4 )
_UpperCamelCase : Optional[Any] = torch.tensor(_lowerCamelCase )
self.assertTrue(np.allclose(transpose(_lowerCamelCase ) , transpose(_lowerCamelCase ).numpy() ) )
_UpperCamelCase : int = np.random.randn(3 , 4 , 5 )
_UpperCamelCase : Any = torch.tensor(_lowerCamelCase )
self.assertTrue(np.allclose(transpose(_lowerCamelCase , axes=(1, 2, 0) ) , transpose(_lowerCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
_UpperCamelCase : Optional[int] = np.random.randn(3 , 4 )
_UpperCamelCase : Optional[Any] = tf.constant(_lowerCamelCase )
self.assertTrue(np.allclose(transpose(_lowerCamelCase ) , transpose(_lowerCamelCase ).numpy() ) )
_UpperCamelCase : Optional[int] = np.random.randn(3 , 4 , 5 )
_UpperCamelCase : Tuple = tf.constant(_lowerCamelCase )
self.assertTrue(np.allclose(transpose(_lowerCamelCase , axes=(1, 2, 0) ) , transpose(_lowerCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
_UpperCamelCase : List[str] = np.random.randn(3 , 4 )
_UpperCamelCase : Union[str, Any] = jnp.array(_lowerCamelCase )
self.assertTrue(np.allclose(transpose(_lowerCamelCase ) , np.asarray(transpose(_lowerCamelCase ) ) ) )
_UpperCamelCase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
_UpperCamelCase : Optional[int] = jnp.array(_lowerCamelCase )
self.assertTrue(np.allclose(transpose(_lowerCamelCase , axes=(1, 2, 0) ) , np.asarray(transpose(_lowerCamelCase , axes=(1, 2, 0) ) ) ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase : Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_lowerCamelCase , (4, 3) ) , np.reshape(_lowerCamelCase , (4, 3) ) ) )
_UpperCamelCase : str = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_lowerCamelCase , (12, 5) ) , np.reshape(_lowerCamelCase , (12, 5) ) ) )
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
_UpperCamelCase : Tuple = np.random.randn(3 , 4 )
_UpperCamelCase : Optional[Any] = torch.tensor(_lowerCamelCase )
self.assertTrue(np.allclose(reshape(_lowerCamelCase , (4, 3) ) , reshape(_lowerCamelCase , (4, 3) ).numpy() ) )
_UpperCamelCase : List[str] = np.random.randn(3 , 4 , 5 )
_UpperCamelCase : Tuple = torch.tensor(_lowerCamelCase )
self.assertTrue(np.allclose(reshape(_lowerCamelCase , (12, 5) ) , reshape(_lowerCamelCase , (12, 5) ).numpy() ) )
@require_tf
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
_UpperCamelCase : str = np.random.randn(3 , 4 )
_UpperCamelCase : Optional[Any] = tf.constant(_lowerCamelCase )
self.assertTrue(np.allclose(reshape(_lowerCamelCase , (4, 3) ) , reshape(_lowerCamelCase , (4, 3) ).numpy() ) )
_UpperCamelCase : str = np.random.randn(3 , 4 , 5 )
_UpperCamelCase : Union[str, Any] = tf.constant(_lowerCamelCase )
self.assertTrue(np.allclose(reshape(_lowerCamelCase , (12, 5) ) , reshape(_lowerCamelCase , (12, 5) ).numpy() ) )
@require_flax
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
_UpperCamelCase : Optional[int] = np.random.randn(3 , 4 )
_UpperCamelCase : Union[str, Any] = jnp.array(_lowerCamelCase )
self.assertTrue(np.allclose(reshape(_lowerCamelCase , (4, 3) ) , np.asarray(reshape(_lowerCamelCase , (4, 3) ) ) ) )
_UpperCamelCase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
_UpperCamelCase : List[str] = jnp.array(_lowerCamelCase )
self.assertTrue(np.allclose(reshape(_lowerCamelCase , (12, 5) ) , np.asarray(reshape(_lowerCamelCase , (12, 5) ) ) ) )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
_UpperCamelCase : str = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_lowerCamelCase ) , np.squeeze(_lowerCamelCase ) ) )
_UpperCamelCase : List[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_lowerCamelCase , axis=2 ) , np.squeeze(_lowerCamelCase , axis=2 ) ) )
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
_UpperCamelCase : Optional[Any] = np.random.randn(1 , 3 , 4 )
_UpperCamelCase : Optional[Any] = torch.tensor(_lowerCamelCase )
self.assertTrue(np.allclose(squeeze(_lowerCamelCase ) , squeeze(_lowerCamelCase ).numpy() ) )
_UpperCamelCase : Dict = np.random.randn(1 , 4 , 1 , 5 )
_UpperCamelCase : str = torch.tensor(_lowerCamelCase )
self.assertTrue(np.allclose(squeeze(_lowerCamelCase , axis=2 ) , squeeze(_lowerCamelCase , axis=2 ).numpy() ) )
@require_tf
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
_UpperCamelCase : str = np.random.randn(1 , 3 , 4 )
_UpperCamelCase : int = tf.constant(_lowerCamelCase )
self.assertTrue(np.allclose(squeeze(_lowerCamelCase ) , squeeze(_lowerCamelCase ).numpy() ) )
_UpperCamelCase : List[Any] = np.random.randn(1 , 4 , 1 , 5 )
_UpperCamelCase : int = tf.constant(_lowerCamelCase )
self.assertTrue(np.allclose(squeeze(_lowerCamelCase , axis=2 ) , squeeze(_lowerCamelCase , axis=2 ).numpy() ) )
@require_flax
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : Optional[Any] = np.random.randn(1 , 3 , 4 )
_UpperCamelCase : str = jnp.array(_lowerCamelCase )
self.assertTrue(np.allclose(squeeze(_lowerCamelCase ) , np.asarray(squeeze(_lowerCamelCase ) ) ) )
_UpperCamelCase : List[Any] = np.random.randn(1 , 4 , 1 , 5 )
_UpperCamelCase : Dict = jnp.array(_lowerCamelCase )
self.assertTrue(np.allclose(squeeze(_lowerCamelCase , axis=2 ) , np.asarray(squeeze(_lowerCamelCase , axis=2 ) ) ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
_UpperCamelCase : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_lowerCamelCase , axis=1 ) , np.expand_dims(_lowerCamelCase , axis=1 ) ) )
@require_torch
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = np.random.randn(3 , 4 )
_UpperCamelCase : str = torch.tensor(_lowerCamelCase )
self.assertTrue(np.allclose(expand_dims(_lowerCamelCase , axis=1 ) , expand_dims(_lowerCamelCase , axis=1 ).numpy() ) )
@require_tf
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
_UpperCamelCase : List[Any] = np.random.randn(3 , 4 )
_UpperCamelCase : Dict = tf.constant(_lowerCamelCase )
self.assertTrue(np.allclose(expand_dims(_lowerCamelCase , axis=1 ) , expand_dims(_lowerCamelCase , axis=1 ).numpy() ) )
@require_flax
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
_UpperCamelCase : Optional[Any] = np.random.randn(3 , 4 )
_UpperCamelCase : Optional[Any] = jnp.array(_lowerCamelCase )
self.assertTrue(np.allclose(expand_dims(_lowerCamelCase , axis=1 ) , np.asarray(expand_dims(_lowerCamelCase , axis=1 ) ) ) )
| 368
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCamelCase__ = True
except ImportError:
lowerCamelCase__ = False
try:
from torch.hub import _get_torch_home
lowerCamelCase__ = _get_torch_home()
except ImportError:
lowerCamelCase__ = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
lowerCamelCase__ = os.path.join(torch_cache_home, "transformers")
lowerCamelCase__ = "https://cdn.huggingface.co"
lowerCamelCase__ = "https://s3.amazonaws.com/models.huggingface.co/bert"
lowerCamelCase__ = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
lowerCamelCase__ = os.path.join(PATH, "config.yaml")
lowerCamelCase__ = os.path.join(PATH, "attributes.txt")
lowerCamelCase__ = os.path.join(PATH, "objects.txt")
lowerCamelCase__ = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
lowerCamelCase__ = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
lowerCamelCase__ = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
lowerCamelCase__ = "pytorch_model.bin"
lowerCamelCase__ = "config.yaml"
def lowercase__ ( lowercase_=OBJECTS ,lowercase_=ATTRIBUTES ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
_UpperCamelCase : Any = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = OrderedDict()
with open(lowercase_ ,"rb" ) as f:
_UpperCamelCase : List[str] = pkl.load(lowercase_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
_UpperCamelCase : List[str] = ckp.pop(lowercase_ )
if isinstance(lowercase_ ,np.ndarray ):
_UpperCamelCase : List[Any] = torch.tensor(lowercase_ )
else:
assert isinstance(lowercase_ ,torch.tensor ), type(lowercase_ )
_UpperCamelCase : Optional[Any] = v
return r
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = {}
def __init__( self : str , __a : dict , __a : str = "root" , __a : Any=0 ) -> Any:
_UpperCamelCase : Optional[Any] = name
_UpperCamelCase : Optional[Any] = level
_UpperCamelCase : Union[str, Any] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_UpperCamelCase : Optional[int] = copy.deepcopy(__a )
_UpperCamelCase : Dict = copy.deepcopy(__a )
if isinstance(__a , __a ):
_UpperCamelCase : Union[str, Any] = Config(__a , name=__a , level=level + 1 )
_UpperCamelCase : Optional[Any] = v
setattr(self , __a , __a )
_UpperCamelCase : Optional[Any] = d
def __repr__( self : List[str] ) -> List[Any]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Dict , __a : Union[str, Any] , __a : Optional[int] ) -> int:
_UpperCamelCase : Any = val
_UpperCamelCase : Optional[Any] = val
_UpperCamelCase : Dict = key.split("." )
_UpperCamelCase : int = len(__a ) - 1
_UpperCamelCase : List[str] = self._pointer
if len(__a ) > 1:
for i, l in enumerate(__a ):
if hasattr(self , __a ) and isinstance(getattr(self , __a ) , __a ):
setattr(getattr(self , __a ) , ".".join(levels[i:] ) , __a )
if l == last_level:
_UpperCamelCase : str = val
else:
_UpperCamelCase : List[str] = pointer[l]
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._pointer
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Tuple , __a : List[str] ) -> Dict:
with open(F'''{file_name}''' , "w" ) as stream:
dump(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[Any] , __a : Dict ) -> List[Any]:
with open(F'''{file_name}''' , "w" ) as stream:
json.dump(__a , __a )
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : Union[str, Any] ) -> Optional[int]:
with open(__a ) as stream:
_UpperCamelCase : int = load(__a , Loader=__a )
return data
def __str__( self : List[str] ) -> Tuple:
_UpperCamelCase : List[str] = " "
if self._name != "root":
_UpperCamelCase : Dict = F'''{t * (self._level-1)}{self._name}:\n'''
else:
_UpperCamelCase : Any = ""
_UpperCamelCase : Any = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__a , __a ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(__a ).__name__})\n'''
_UpperCamelCase : Optional[Any] = level
return r[:-1]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Dict , __a : str , **__a : str ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : int = cls.get_config_dict(__a , **__a )
return cls(__a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , __a : str , **__a : Union[str, Any] ) -> Tuple:
_UpperCamelCase : Tuple = kwargs.pop("cache_dir" , __a )
_UpperCamelCase : Optional[int] = kwargs.pop("force_download" , __a )
_UpperCamelCase : str = kwargs.pop("resume_download" , __a )
_UpperCamelCase : Any = kwargs.pop("proxies" , __a )
_UpperCamelCase : List[Any] = kwargs.pop("local_files_only" , __a )
if os.path.isdir(__a ):
_UpperCamelCase : Optional[Any] = os.path.join(__a , __a )
elif os.path.isfile(__a ) or is_remote_url(__a ):
_UpperCamelCase : Optional[int] = pretrained_model_name_or_path
else:
_UpperCamelCase : int = hf_bucket_url(__a , filename=__a , use_cdn=__a )
try:
# Load from URL or cache if already cached
_UpperCamelCase : Optional[int] = cached_path(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_UpperCamelCase : List[Any] = Config.load_yaml(__a )
except EnvironmentError:
_UpperCamelCase : Union[str, Any] = "Can't load config for"
raise EnvironmentError(__a )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(__a ), kwargs
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : str = torch.load("dump.pt" ,map_location=in_tensor.device )
_UpperCamelCase : str = in_tensor.numpy()
_UpperCamelCase : Union[str, Any] = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ), (
F'''{sum([1 for x in np.isclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Dict = urlparse(lowercase_ )
return parsed.scheme in ("http", "https")
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=True ) -> str:
"""simple docstring"""
_UpperCamelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_UpperCamelCase : List[str] = "/" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=0 ,lowercase_=None ,) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowercase_ ,lowercase_ ):
ua += "; " + "; ".join("{}/{}".format(lowercase_ ,lowercase_ ) for k, v in user_agent.items() )
elif isinstance(lowercase_ ,lowercase_ ):
ua += "; " + user_agent
_UpperCamelCase : Any = {"user-agent": ua}
if resume_size > 0:
_UpperCamelCase : str = "bytes=%d-" % (resume_size,)
_UpperCamelCase : str = requests.get(lowercase_ ,stream=lowercase_ ,proxies=lowercase_ ,headers=lowercase_ )
if response.status_code == 416: # Range not satisfiable
return
_UpperCamelCase : List[str] = response.headers.get("Content-Length" )
_UpperCamelCase : Union[str, Any] = resume_size + int(lowercase_ ) if content_length is not None else None
_UpperCamelCase : Optional[int] = tqdm(
unit="B" ,unit_scale=lowercase_ ,total=lowercase_ ,initial=lowercase_ ,desc="Downloading" ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowercase_ ) )
temp_file.write(lowercase_ )
progress.close()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=10 ,lowercase_=False ,lowercase_=None ,lowercase_=False ,) -> Tuple:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : str = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : Dict = str(lowercase_ )
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
_UpperCamelCase : Dict = None
if not local_files_only:
try:
_UpperCamelCase : List[Any] = requests.head(lowercase_ ,allow_redirects=lowercase_ ,proxies=lowercase_ ,timeout=lowercase_ )
if response.status_code == 200:
_UpperCamelCase : str = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_UpperCamelCase : int = url_to_filename(lowercase_ ,lowercase_ )
# get cache path to put the file
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowercase_ ):
return cache_path
else:
_UpperCamelCase : Optional[int] = [
file
for file in fnmatch.filter(os.listdir(lowercase_ ) ,filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(lowercase_ ) > 0:
return os.path.join(lowercase_ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(lowercase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_UpperCamelCase : Dict = cache_path + ".lock"
with FileLock(lowercase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowercase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_UpperCamelCase : List[str] = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(lowercase_ ,"a+b" ) as f:
yield f
_UpperCamelCase : Union[str, Any] = _resumable_file_manager
if os.path.exists(lowercase_ ):
_UpperCamelCase : str = os.stat(lowercase_ ).st_size
else:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Tuple = partial(tempfile.NamedTemporaryFile ,dir=lowercase_ ,delete=lowercase_ )
_UpperCamelCase : Optional[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" ,lowercase_ ,temp_file.name ,)
http_get(
lowercase_ ,lowercase_ ,proxies=lowercase_ ,resume_size=lowercase_ ,user_agent=lowercase_ ,)
os.replace(temp_file.name ,lowercase_ )
_UpperCamelCase : Optional[int] = {"url": url, "etag": etag}
_UpperCamelCase : List[str] = cache_path + ".json"
with open(lowercase_ ,"w" ) as meta_file:
json.dump(lowercase_ ,lowercase_ )
return cache_path
def lowercase__ ( lowercase_ ,lowercase_=None ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[int] = url.encode("utf-8" )
_UpperCamelCase : List[str] = shaaaa(lowercase_ )
_UpperCamelCase : List[str] = url_hash.hexdigest()
if etag:
_UpperCamelCase : Optional[Any] = etag.encode("utf-8" )
_UpperCamelCase : Optional[Any] = shaaaa(lowercase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=False ,lowercase_=False ,) -> str:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : List[Any] = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if is_remote_url(lowercase_ ):
# URL, so get it from the cache (downloading if necessary)
_UpperCamelCase : Union[str, Any] = get_from_cache(
lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,proxies=lowercase_ ,resume_download=lowercase_ ,user_agent=lowercase_ ,local_files_only=lowercase_ ,)
elif os.path.exists(lowercase_ ):
# File, and it exists.
_UpperCamelCase : List[str] = url_or_filename
elif urlparse(lowercase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(lowercase_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(lowercase_ ) )
if extract_compressed_file:
if not is_zipfile(lowercase_ ) and not tarfile.is_tarfile(lowercase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_UpperCamelCase, _UpperCamelCase : Any = os.path.split(lowercase_ )
_UpperCamelCase : Optional[int] = output_file.replace("." ,"-" ) + "-extracted"
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
if os.path.isdir(lowercase_ ) and os.listdir(lowercase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_UpperCamelCase : Optional[int] = output_path + ".lock"
with FileLock(lowercase_ ):
shutil.rmtree(lowercase_ ,ignore_errors=lowercase_ )
os.makedirs(lowercase_ )
if is_zipfile(lowercase_ ):
with ZipFile(lowercase_ ,"r" ) as zip_file:
zip_file.extractall(lowercase_ )
zip_file.close()
elif tarfile.is_tarfile(lowercase_ ):
_UpperCamelCase : int = tarfile.open(lowercase_ )
tar_file.extractall(lowercase_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(lowercase_ ) )
return output_path_extracted
return output_path
def lowercase__ ( lowercase_ ,lowercase_="," ) -> Optional[int]:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
with open(lowercase_ ) as f:
_UpperCamelCase : Tuple = eval(f.read() )
else:
_UpperCamelCase : str = requests.get(lowercase_ )
try:
_UpperCamelCase : Optional[int] = requests.json()
except Exception:
_UpperCamelCase : Union[str, Any] = req.content.decode()
assert data is not None, "could not connect"
try:
_UpperCamelCase : List[Any] = eval(lowercase_ )
except Exception:
_UpperCamelCase : int = data.split("\n" )
req.close()
return data
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : List[Any] = requests.get(lowercase_ )
_UpperCamelCase : Optional[int] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : List[Any] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowercase_ )
with open(lowercase_ ,"rb" ) as stream:
_UpperCamelCase : Union[str, Any] = pkl.load(lowercase_ )
_UpperCamelCase : Union[str, Any] = weights.pop("model" )
_UpperCamelCase : Optional[int] = {}
for k, v in model.items():
_UpperCamelCase : str = torch.from_numpy(lowercase_ )
if "running_var" in k:
_UpperCamelCase : List[Any] = torch.tensor([0] )
_UpperCamelCase : str = k.replace("running_var" ,"num_batches_tracked" )
_UpperCamelCase : Any = zero
return new
def lowercase__ ( ) -> Dict:
"""simple docstring"""
print(F'''{os.path.abspath(os.path.join(lowercase_ ,os.pardir ) )}/demo.ipynb''' )
def lowercase__ ( lowercase_ ,lowercase_="RGB" ) -> int:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : Optional[Any] = cva.imread(lowercase_ )
else:
_UpperCamelCase : Optional[int] = get_image_from_url(lowercase_ )
assert img is not None, F'''could not connect to: {im}'''
_UpperCamelCase : Optional[int] = cva.cvtColor(lowercase_ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
_UpperCamelCase : List[Any] = img[:, :, ::-1]
return img
def lowercase__ ( lowercase_ ,lowercase_=1 ) -> List[Any]:
"""simple docstring"""
return (images[i : i + batch] for i in range(0 ,len(lowercase_ ) ,lowercase_ ))
| 310
| 0
|
"""simple docstring"""
lowerCamelCase__ = {str(digit): digit**5 for digit in range(10)}
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_snake_case ) )
def lowercase__ ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(1_000 ,1_000_000 )
if number == digits_fifth_powers_sum(_snake_case ) )
if __name__ == "__main__":
print(solution())
| 369
|
"""simple docstring"""
import torch
from transformers import AutoModel
class __SCREAMING_SNAKE_CASE ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple="sayef/fsner-bert-base-uncased" ) -> Dict:
super(__a , self ).__init__()
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained(__a , return_dict=__a )
_UpperCamelCase : str = torch.nn.CosineSimilarity(3 , 1e-0_8 )
_UpperCamelCase : List[str] = torch.nn.Softmax(dim=1 )
def __SCREAMING_SNAKE_CASE ( self : int , **__a : Tuple ) -> Optional[Any]:
return self.bert(**__a ).last_hidden_state
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Any , __a : List[Any] , __a : Tuple=1 ) -> List[Any]:
return self.softmax(T * self.cos(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Dict ) -> Union[str, Any]:
_UpperCamelCase : str = W_supports["sizes"].tolist()
_UpperCamelCase : Any = W_supports["start_token_id"].item()
_UpperCamelCase : Optional[Any] = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_UpperCamelCase : str = self.BERT(**__a )
_UpperCamelCase : int = self.BERT(**__a )
_UpperCamelCase : int = None
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : List[Any] = W_supports["input_ids"] == start_token_id
_UpperCamelCase : Optional[int] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Any = support_sizes[i - 1]
_UpperCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]]
_UpperCamelCase : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_UpperCamelCase : List[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_UpperCamelCase : Any = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_UpperCamelCase : Any = torch.vstack((p_starts, p_start) )
_UpperCamelCase : Any = torch.vstack((p_ends, p_end) )
else:
_UpperCamelCase : Optional[Any] = p_start
_UpperCamelCase : str = p_end
return p_starts, p_ends
| 310
| 0
|
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Tuple ) -> List[str]:
_UpperCamelCase : int = ""
_UpperCamelCase : str = ""
_UpperCamelCase : int = []
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : int , __a : int ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_UpperCamelCase : List[str] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_UpperCamelCase : int = self.__min_dist_top_down_dp(_a , n - 1 )
_UpperCamelCase : Optional[int] = self.__min_dist_top_down_dp(m - 1 , _a )
_UpperCamelCase : Optional[int] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
_UpperCamelCase : str = 1 + min(_a , _a , _a )
return self.dp[m][n]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : str , __a : str ) -> Optional[Any]:
_UpperCamelCase : str = worda
_UpperCamelCase : Optional[int] = worda
_UpperCamelCase : Any = [[-1 for _ in range(len(_a ) )] for _ in range(len(_a ) )]
return self.__min_dist_top_down_dp(len(_a ) - 1 , len(_a ) - 1 )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str , __a : str ) -> Dict:
_UpperCamelCase : Optional[int] = worda
_UpperCamelCase : Union[str, Any] = worda
_UpperCamelCase : Any = len(_a )
_UpperCamelCase : Optional[Any] = len(_a )
_UpperCamelCase : Optional[int] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_UpperCamelCase : Union[str, Any] = j
elif j == 0: # second string is empty
_UpperCamelCase : Optional[int] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_UpperCamelCase : Any = self.dp[i - 1][j - 1]
else:
_UpperCamelCase : List[str] = self.dp[i][j - 1]
_UpperCamelCase : Any = self.dp[i - 1][j]
_UpperCamelCase : Optional[int] = self.dp[i - 1][j - 1]
_UpperCamelCase : List[Any] = 1 + min(_a , _a , _a )
return self.dp[m][n]
if __name__ == "__main__":
lowerCamelCase__ = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
lowerCamelCase__ = input("Enter the first string: ").strip()
lowerCamelCase__ = input("Enter the second string: ").strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 370
|
"""simple docstring"""
from typing import Any
def lowercase__ ( lowercase_ ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
_UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list]
_UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 0
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> float:
"""simple docstring"""
_validate_point(_lowerCAmelCase )
_validate_point(_lowerCAmelCase )
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(_lowerCAmelCase ,_lowerCAmelCase ) ) )
def lowercase__ ( lowercase_ ) -> None:
"""simple docstring"""
if point:
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
for item in point:
if not isinstance(_lowerCAmelCase ,(int, float) ):
_UpperCamelCase : int = (
"""Expected a list of numbers as input, found """
F'''{type(_lowerCAmelCase ).__name__}'''
)
raise TypeError(_lowerCAmelCase )
else:
_UpperCamelCase : str = F'''Expected a list of numbers as input, found {type(_lowerCAmelCase ).__name__}'''
raise TypeError(_lowerCAmelCase )
else:
raise ValueError("Missing an input" )
def lowercase__ ( lowercase_ ,lowercase_ ) -> float:
"""simple docstring"""
_validate_point(_lowerCAmelCase )
_validate_point(_lowerCAmelCase )
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(_lowerCAmelCase ,_lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCamelCase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "rag"
SCREAMING_SNAKE_CASE__ :List[str] = True
def __init__( self : List[Any] , __a : Optional[Any]=None , __a : str=True , __a : Tuple=None , __a : Dict=None , __a : Optional[int]=None , __a : Optional[int]=None , __a : List[Any]=None , __a : Dict=" / " , __a : int=" // " , __a : Optional[Any]=5 , __a : Dict=300 , __a : Optional[int]=768 , __a : Tuple=8 , __a : Union[str, Any]="wiki_dpr" , __a : Dict="train" , __a : List[Any]="compressed" , __a : str=None , __a : Tuple=None , __a : int=False , __a : str=False , __a : Optional[int]=0.0 , __a : Dict=True , __a : Tuple=False , __a : Dict=False , __a : str=False , __a : str=True , __a : Optional[Any]=None , **__a : Tuple , ) -> Any:
super().__init__(
bos_token_id=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , is_encoder_decoder=__a , prefix=__a , vocab_size=__a , **__a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_UpperCamelCase : Optional[int] = kwargs.pop("question_encoder" )
_UpperCamelCase : str = question_encoder_config.pop("model_type" )
_UpperCamelCase : Tuple = kwargs.pop("generator" )
_UpperCamelCase : str = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_UpperCamelCase : Union[str, Any] = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : str = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : Optional[int] = reduce_loss
_UpperCamelCase : str = label_smoothing
_UpperCamelCase : int = exclude_bos_score
_UpperCamelCase : List[str] = do_marginalize
_UpperCamelCase : Optional[int] = title_sep
_UpperCamelCase : Optional[int] = doc_sep
_UpperCamelCase : Union[str, Any] = n_docs
_UpperCamelCase : Tuple = max_combined_length
_UpperCamelCase : Union[str, Any] = dataset
_UpperCamelCase : Any = dataset_split
_UpperCamelCase : List[str] = index_name
_UpperCamelCase : int = retrieval_vector_size
_UpperCamelCase : str = retrieval_batch_size
_UpperCamelCase : Dict = passages_path
_UpperCamelCase : str = index_path
_UpperCamelCase : Tuple = use_dummy_dataset
_UpperCamelCase : Union[str, Any] = output_retrieved
_UpperCamelCase : Optional[Any] = do_deduplication
_UpperCamelCase : str = use_cache
if self.forced_eos_token_id is None:
_UpperCamelCase : List[str] = getattr(self.generator , "forced_eos_token_id" , __a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Dict = copy.deepcopy(self.__dict__ )
_UpperCamelCase : List[Any] = self.question_encoder.to_dict()
_UpperCamelCase : Tuple = self.generator.to_dict()
_UpperCamelCase : Any = self.__class__.model_type
return output
| 310
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = SwinvaConfig()
_UpperCamelCase : int = swinva_name.split("_" )
_UpperCamelCase : str = name_split[1]
if "to" in name_split[3]:
_UpperCamelCase : Any = int(name_split[3][-3:] )
else:
_UpperCamelCase : Optional[int] = int(name_split[3] )
if "to" in name_split[2]:
_UpperCamelCase : Dict = int(name_split[2][-2:] )
else:
_UpperCamelCase : List[Any] = int(name_split[2][6:] )
if model_size == "tiny":
_UpperCamelCase : Any = 96
_UpperCamelCase : Optional[Any] = (2, 2, 6, 2)
_UpperCamelCase : Union[str, Any] = (3, 6, 12, 24)
elif model_size == "small":
_UpperCamelCase : List[str] = 96
_UpperCamelCase : Optional[int] = (2, 2, 18, 2)
_UpperCamelCase : Optional[Any] = (3, 6, 12, 24)
elif model_size == "base":
_UpperCamelCase : Any = 128
_UpperCamelCase : int = (2, 2, 18, 2)
_UpperCamelCase : Tuple = (4, 8, 16, 32)
else:
_UpperCamelCase : List[Any] = 192
_UpperCamelCase : Any = (2, 2, 18, 2)
_UpperCamelCase : Union[str, Any] = (6, 12, 24, 48)
if "to" in swinva_name:
_UpperCamelCase : Tuple = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_UpperCamelCase : Optional[Any] = 21_841
_UpperCamelCase : Optional[Any] = '''huggingface/label-files'''
_UpperCamelCase : List[str] = '''imagenet-22k-id2label.json'''
_UpperCamelCase : str = json.load(open(hf_hub_download(__lowerCAmelCase ,__lowerCAmelCase ,repo_type="dataset" ) ,"r" ) )
_UpperCamelCase : List[Any] = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCamelCase : int = idalabel
_UpperCamelCase : Any = {v: k for k, v in idalabel.items()}
else:
_UpperCamelCase : Tuple = 1_000
_UpperCamelCase : int = '''huggingface/label-files'''
_UpperCamelCase : Union[str, Any] = '''imagenet-1k-id2label.json'''
_UpperCamelCase : Optional[int] = json.load(open(hf_hub_download(__lowerCAmelCase ,__lowerCAmelCase ,repo_type="dataset" ) ,"r" ) )
_UpperCamelCase : str = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCamelCase : Union[str, Any] = idalabel
_UpperCamelCase : int = {v: k for k, v in idalabel.items()}
_UpperCamelCase : int = img_size
_UpperCamelCase : Any = num_classes
_UpperCamelCase : Tuple = embed_dim
_UpperCamelCase : Any = depths
_UpperCamelCase : str = num_heads
_UpperCamelCase : Any = window_size
return config
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
if "patch_embed.proj" in name:
_UpperCamelCase : List[Any] = name.replace("patch_embed.proj" ,"embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_UpperCamelCase : Tuple = name.replace("patch_embed.norm" ,"embeddings.norm" )
if "layers" in name:
_UpperCamelCase : List[Any] = '''encoder.''' + name
if "attn.proj" in name:
_UpperCamelCase : Optional[int] = name.replace("attn.proj" ,"attention.output.dense" )
if "attn" in name:
_UpperCamelCase : int = name.replace("attn" ,"attention.self" )
if "norm1" in name:
_UpperCamelCase : Any = name.replace("norm1" ,"layernorm_before" )
if "norm2" in name:
_UpperCamelCase : List[Any] = name.replace("norm2" ,"layernorm_after" )
if "mlp.fc1" in name:
_UpperCamelCase : Union[str, Any] = name.replace("mlp.fc1" ,"intermediate.dense" )
if "mlp.fc2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("mlp.fc2" ,"output.dense" )
if "q_bias" in name:
_UpperCamelCase : str = name.replace("q_bias" ,"query.bias" )
if "k_bias" in name:
_UpperCamelCase : Union[str, Any] = name.replace("k_bias" ,"key.bias" )
if "v_bias" in name:
_UpperCamelCase : Dict = name.replace("v_bias" ,"value.bias" )
if "cpb_mlp" in name:
_UpperCamelCase : Tuple = name.replace("cpb_mlp" ,"continuous_position_bias_mlp" )
if name == "norm.weight":
_UpperCamelCase : Tuple = '''layernorm.weight'''
if name == "norm.bias":
_UpperCamelCase : Optional[Any] = '''layernorm.bias'''
if "head" in name:
_UpperCamelCase : Union[str, Any] = name.replace("head" ,"classifier" )
else:
_UpperCamelCase : Tuple = '''swinv2.''' + name
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> List[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_UpperCamelCase : Optional[int] = orig_state_dict.pop(__lowerCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
_UpperCamelCase : Dict = key.split("." )
_UpperCamelCase : Any = int(key_split[1] )
_UpperCamelCase : List[Any] = int(key_split[3] )
_UpperCamelCase : List[str] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCamelCase : Any = val[:dim, :]
_UpperCamelCase : str = val[dim : dim * 2, :]
_UpperCamelCase : Union[str, Any] = val[-dim:, :]
else:
_UpperCamelCase : Tuple = val[:dim]
_UpperCamelCase : List[str] = val[
dim : dim * 2
]
_UpperCamelCase : str = val[-dim:]
else:
_UpperCamelCase : Tuple = val
return orig_state_dict
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : int = timm.create_model(__lowerCAmelCase ,pretrained=__lowerCAmelCase )
timm_model.eval()
_UpperCamelCase : List[str] = get_swinva_config(__lowerCAmelCase )
_UpperCamelCase : int = SwinvaForImageClassification(__lowerCAmelCase )
model.eval()
_UpperCamelCase : Dict = convert_state_dict(timm_model.state_dict() ,__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
_UpperCamelCase : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase : List[Any] = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" ,"-" ) ) )
_UpperCamelCase : str = Image.open(requests.get(__lowerCAmelCase ,stream=__lowerCAmelCase ).raw )
_UpperCamelCase : Tuple = image_processor(images=__lowerCAmelCase ,return_tensors="pt" )
_UpperCamelCase : Any = timm_model(inputs["pixel_values"] )
_UpperCamelCase : List[Any] = model(**__lowerCAmelCase ).logits
assert torch.allclose(__lowerCAmelCase ,__lowerCAmelCase ,atol=1e-3 )
print(F'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
model.push_to_hub(
repo_path_or_name=Path(__lowerCAmelCase ,__lowerCAmelCase ) ,organization="nandwalritik" ,commit_message="Add model" ,)
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swinv2_name",
default="swinv2_tiny_patch4_window8_256",
type=str,
help="Name of the Swinv2 timm model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowerCamelCase__ = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 350
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : List[Any] , __a : str=13 , __a : Any=30 , __a : List[str]=2 , __a : Dict=3 , __a : Union[str, Any]=True , __a : Dict=True , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : str=0.1 , __a : Optional[int]=0.1 , __a : Union[str, Any]=10 , __a : Optional[Any]=0.02 , __a : List[Any]=None , __a : str=2 , ) -> int:
_UpperCamelCase : Tuple = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : int = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = type_sequence_label_size
_UpperCamelCase : int = initializer_range
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Any = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCamelCase : Optional[int] = num_patches + 1
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Union[str, Any] = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Any = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = ViTModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[int] , __a : int ) -> Optional[int]:
_UpperCamelCase : Tuple = ViTForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Union[str, Any] = ViTForMaskedImageModeling(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : Dict = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : int , __a : Dict ) -> int:
_UpperCamelCase : Any = self.type_sequence_label_size
_UpperCamelCase : Optional[Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : int = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Union[str, Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : List[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : Union[str, Any] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :Any = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :str = True
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Dict = ViTModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__a )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[str] = [*signature.parameters.keys()]
_UpperCamelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[str] = ViTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : List[Any] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__a )
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[Any] = prepare_img()
_UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : Dict = model(**__a )
# verify the logits
_UpperCamelCase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_UpperCamelCase : List[str] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__a )
_UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : str = model(__a , interpolate_pos_encoding=__a )
# verify the logits
_UpperCamelCase : int = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , __a )
_UpperCamelCase : int = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
_UpperCamelCase : int = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : Union[str, Any] = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCamelCase : int = model(__a )
| 310
| 0
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCamelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
def lowercase__ ( lowercase_ ,lowercase_=100 ,lowercase_=" " ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = text.split(_UpperCAmelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 ,len(_UpperCAmelCase ) ,_UpperCAmelCase )]
def lowercase__ ( lowercase_ ) -> dict:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [], []
for title, text in zip(documents["title"] ,documents["text"] ):
if text is not None:
for passage in split_text(_UpperCAmelCase ):
titles.append(title if title is not None else "" )
texts.append(_UpperCAmelCase )
return {"title": titles, "text": texts}
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict:
"""simple docstring"""
_UpperCamelCase : List[str] = ctx_tokenizer(
documents["title"] ,documents["text"] ,truncation=_UpperCAmelCase ,padding="longest" ,return_tensors="pt" )['input_ids']
_UpperCamelCase : Dict = ctx_encoder(input_ids.to(device=_UpperCAmelCase ) ,return_dict=_UpperCAmelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,) -> Optional[int]:
"""simple docstring"""
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_UpperCamelCase : Any = load_dataset(
"csv" ,data_files=[rag_example_args.csv_path] ,split="train" ,delimiter="\t" ,column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_UpperCamelCase : int = dataset.map(_UpperCAmelCase ,batched=_UpperCAmelCase ,num_proc=processing_args.num_proc )
# And compute the embeddings
_UpperCamelCase : Dict = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_UpperCAmelCase )
_UpperCamelCase : int = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_UpperCamelCase : List[Any] = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
_UpperCamelCase : Tuple = dataset.map(
partial(_UpperCAmelCase ,ctx_encoder=_UpperCAmelCase ,ctx_tokenizer=_UpperCAmelCase ) ,batched=_UpperCAmelCase ,batch_size=processing_args.batch_size ,features=_UpperCAmelCase ,)
# And finally save your dataset
_UpperCamelCase : List[Any] = os.path.join(rag_example_args.output_dir ,"my_knowledge_dataset" )
dataset.save_to_disk(_UpperCAmelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_UpperCamelCase : List[str] = faiss.IndexHNSWFlat(index_hnsw_args.d ,index_hnsw_args.m ,faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" ,custom_index=_UpperCAmelCase )
# And save the index
_UpperCamelCase : str = os.path.join(rag_example_args.output_dir ,"my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(_UpperCAmelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = field(
default=str(Path(__UpperCamelCase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
SCREAMING_SNAKE_CASE__ :Optional[Any] = field(
default=__UpperCamelCase , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
SCREAMING_SNAKE_CASE__ :Optional[int] = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
SCREAMING_SNAKE_CASE__ :List[str] = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
SCREAMING_SNAKE_CASE__ :Any = field(
default=str(Path(__UpperCamelCase ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = field(
default=__UpperCamelCase , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
SCREAMING_SNAKE_CASE__ :List[str] = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
SCREAMING_SNAKE_CASE__ :int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCamelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 351
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[int] = -1
_UpperCamelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Any = TextStreamer(__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Optional[int] = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Dict = -1
_UpperCamelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : List[str] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[int] = tokenizer.decode(greedy_ids[0] )
_UpperCamelCase : Tuple = TextIteratorStreamer(__a )
_UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : Optional[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
_UpperCamelCase : Tuple = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Union[str, Any] = -1
_UpperCamelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : str = greedy_ids[:, input_ids.shape[1] :]
_UpperCamelCase : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Optional[int] = TextStreamer(__a , skip_prompt=__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Tuple = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("distilgpt2" )
_UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a )
_UpperCamelCase : int = -1
_UpperCamelCase : Any = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCamelCase : List[str] = TextStreamer(__a , skip_special_tokens=__a )
model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCamelCase : int = cs.out[:-1] # Remove the final "\n"
_UpperCamelCase : int = tokenizer(__a , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[Any] = -1
_UpperCamelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Any = TextIteratorStreamer(__a , timeout=0.0_01 )
_UpperCamelCase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : List[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__a ):
_UpperCamelCase : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 310
| 0
|
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowerCamelCase__ = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : str = test_results.split(" " )
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Dict = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_UpperCamelCase : int = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : Optional[int] = False
for line in failures_short_lines.split("\n" ):
if re.search(r"_ \[doctest\]" ,lowercase_ ):
_UpperCamelCase : List[str] = True
_UpperCamelCase : Dict = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
_UpperCamelCase : Tuple = line
_UpperCamelCase : Tuple = False
return failures
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __a : List[str] , __a : str ) -> List[str]:
_UpperCamelCase : Dict = title
_UpperCamelCase : Optional[int] = doc_test_results["time_spent"].split("," )[0]
_UpperCamelCase : Optional[Any] = doc_test_results["success"]
_UpperCamelCase : Optional[Any] = doc_test_results["failures"]
_UpperCamelCase : Dict = self.n_success + self.n_failures
# Failures and success of the modeling tests
_UpperCamelCase : Union[str, Any] = doc_test_results
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : str = [self._time_spent]
_UpperCamelCase : Any = 0
for time in time_spent:
_UpperCamelCase : Optional[Any] = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__SCREAMING_SNAKE_CASE ) == 1:
_UpperCamelCase : Any = [0, 0, time_parts[0]]
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'''{int(__SCREAMING_SNAKE_CASE )}h{int(__SCREAMING_SNAKE_CASE )}m{int(__SCREAMING_SNAKE_CASE )}s'''
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : List[Any] = 40
_UpperCamelCase : Tuple = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
_UpperCamelCase : int = ""
for category, failures in category_failures.items():
if len(__SCREAMING_SNAKE_CASE ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__SCREAMING_SNAKE_CASE )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : Optional[int] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__SCREAMING_SNAKE_CASE )
@staticmethod
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
_UpperCamelCase : int = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(__SCREAMING_SNAKE_CASE )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=__SCREAMING_SNAKE_CASE , )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
_UpperCamelCase : List[Any] = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else "All tests passed."
_UpperCamelCase : Optional[int] = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=__SCREAMING_SNAKE_CASE , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Any , __a : Optional[int] , __a : List[Any] , __a : Tuple ) -> Optional[Any]:
_UpperCamelCase : Tuple = ""
for key, value in failures.items():
_UpperCamelCase : str = value[:200] + " [Truncated]" if len(__SCREAMING_SNAKE_CASE ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
_UpperCamelCase : int = job_name
_UpperCamelCase : str = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
_UpperCamelCase : str = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
_UpperCamelCase : Dict = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
_UpperCamelCase : Any = sorted(self.doc_test_results.items() , key=lambda __a : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
_UpperCamelCase : Dict = F'''*Num failures* :{len(job_result['failed'] )} \n'''
_UpperCamelCase : Dict = job_result["failures"]
_UpperCamelCase : Tuple = self.get_reply_blocks(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , text=__SCREAMING_SNAKE_CASE )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F'''Results for {job}''' , blocks=__SCREAMING_SNAKE_CASE , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def lowercase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Any = os.environ["GITHUB_RUN_ID"]
_UpperCamelCase : Optional[Any] = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
_UpperCamelCase : Any = requests.get(lowercase_ ).json()
_UpperCamelCase : int = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_UpperCamelCase : Optional[int] = math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowercase_ ):
_UpperCamelCase : Dict = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." ,lowercase_ )
return {}
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : List[Any] = {}
if os.path.exists(lowercase_ ):
_UpperCamelCase : Tuple = os.listdir(lowercase_ )
for file in files:
try:
with open(os.path.join(lowercase_ ,lowercase_ ) ,encoding="utf-8" ) as f:
_UpperCamelCase : Optional[int] = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(lowercase_ ,lowercase_ )}.''' ) from e
return _artifact
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : List[Any] ) -> Tuple:
_UpperCamelCase : Optional[Any] = name
_UpperCamelCase : List[Any] = []
def __str__( self : Optional[Any] ) -> Any:
return self.name
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[str] ) -> Dict:
self.paths.append({"name": self.name, "path": path} )
_UpperCamelCase : Optional[int] = {}
_UpperCamelCase : str = filter(os.path.isdir ,os.listdir() )
for directory in directories:
_UpperCamelCase : Optional[int] = directory
if artifact_name not in _available_artifacts:
_UpperCamelCase : str = Artifact(lowercase_ )
_available_artifacts[artifact_name].add_path(lowercase_ )
return _available_artifacts
if __name__ == "__main__":
lowerCamelCase__ = get_job_links()
lowerCamelCase__ = retrieve_available_artifacts()
lowerCamelCase__ = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowerCamelCase__ = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowerCamelCase__ = github_actions_job_links.get("run_doctests")
lowerCamelCase__ = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
lowerCamelCase__ = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
lowerCamelCase__ = handle_test_results(artifact["stats"])
lowerCamelCase__ = failed
lowerCamelCase__ = success
lowerCamelCase__ = time_spent[1:-1] + """, """
lowerCamelCase__ = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
lowerCamelCase__ = line.replace("FAILED ", "")
lowerCamelCase__ = line.split()[0].replace("\n", "")
if "::" in line:
lowerCamelCase__ = line.split("::")
else:
lowerCamelCase__ = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowerCamelCase__ = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowerCamelCase__ = all_failures[test] if test in all_failures else """N/A"""
lowerCamelCase__ = failure
break
lowerCamelCase__ = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 352
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
with open(lowercase_ ) as metadata_file:
_UpperCamelCase : Dict = json.load(lowercase_ )
_UpperCamelCase : str = LukeConfig(use_entity_aware_attention=lowercase_ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
_UpperCamelCase : str = torch.load(lowercase_ ,map_location="cpu" )["module"]
# Load the entity vocab file
_UpperCamelCase : Dict = load_original_entity_vocab(lowercase_ )
# add an entry for [MASK2]
_UpperCamelCase : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCamelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCamelCase : Dict = AddedToken("<ent>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
_UpperCamelCase : Union[str, Any] = AddedToken("<ent2>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"r" ) as f:
_UpperCamelCase : Tuple = json.load(lowercase_ )
_UpperCamelCase : Optional[int] = "MLukeTokenizer"
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
with open(os.path.join(lowercase_ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
_UpperCamelCase : int = MLukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(["@"] )[0]
_UpperCamelCase : str = tokenizer.convert_tokens_to_ids(["#"] )[0]
_UpperCamelCase : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
_UpperCamelCase : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 )
_UpperCamelCase : List[str] = word_emb[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCamelCase : Optional[Any] = state_dict[bias_name]
_UpperCamelCase : List[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCamelCase : Tuple = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCamelCase : Tuple = F'''encoder.layer.{layer_index}.attention.self.'''
_UpperCamelCase : List[Any] = state_dict[prefix + matrix_name]
_UpperCamelCase : str = state_dict[prefix + matrix_name]
_UpperCamelCase : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCamelCase : Any = state_dict["entity_embeddings.entity_embeddings.weight"]
_UpperCamelCase : Tuple = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : int = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCamelCase : int = state_dict["entity_predictions.bias"]
_UpperCamelCase : Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCamelCase : str = LukeForMaskedLM(config=lowercase_ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
_UpperCamelCase : List[str] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
_UpperCamelCase : Union[str, Any] = state_dict[key]
else:
_UpperCamelCase : Dict = state_dict[key]
_UpperCamelCase, _UpperCamelCase : Optional[Any] = model.load_state_dict(lowercase_ ,strict=lowercase_ )
if set(lowercase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(lowercase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ,task="entity_classification" )
_UpperCamelCase : Dict = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
_UpperCamelCase : Optional[Any] = (0, 9)
_UpperCamelCase : int = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : List[str] = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 33, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 1, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ )
_UpperCamelCase : int = "Tokyo is the capital of <mask>."
_UpperCamelCase : List[Any] = (24, 30)
_UpperCamelCase : Any = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : Optional[Any] = model(**lowercase_ )
_UpperCamelCase : int = encoding["input_ids"][0].tolist()
_UpperCamelCase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
_UpperCamelCase : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase_ )
_UpperCamelCase : Union[str, Any] = outputs.entity_logits[0][0].argmax().item()
_UpperCamelCase : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[str] = ["[MASK]", "[PAD]", "[UNK]"]
_UpperCamelCase : Tuple = [json.loads(lowercase_ ) for line in open(lowercase_ )]
_UpperCamelCase : List[str] = {}
for entry in data:
_UpperCamelCase : Any = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCamelCase : Dict = entity_id
break
_UpperCamelCase : Dict = F'''{language}:{entity_name}'''
_UpperCamelCase : str = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCamelCase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 310
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = 42
class __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = True
@register_to_config
def __init__( self : Union[str, Any] , __a : List[Any] = 3 , __a : List[Any] = 3 , __a : List[Any] = ("DownEncoderBlock2D",) , __a : Optional[Any] = ("UpDecoderBlock2D",) , __a : str = (64,) , __a : Any = 1 , __a : Optional[int] = "silu" , __a : Optional[Any] = 4 , __a : Union[str, Any] = 32 , __a : int = 32 , __a : List[Any] = 0.1_82_15 , ) -> Optional[Any]:
super().__init__()
# pass init params to Encoder
_UpperCamelCase : Union[str, Any] = Encoder(
in_channels=__a , out_channels=__a , down_block_types=__a , block_out_channels=__a , layers_per_block=__a , act_fn=__a , norm_num_groups=__a , double_z=__a , )
# pass init params to Decoder
_UpperCamelCase : Any = Decoder(
in_channels=__a , out_channels=__a , up_block_types=__a , block_out_channels=__a , layers_per_block=__a , norm_num_groups=__a , act_fn=__a , )
_UpperCamelCase : Tuple = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
_UpperCamelCase : Any = nn.Convad(__a , __a , 1 )
_UpperCamelCase : Dict = False
_UpperCamelCase : Union[str, Any] = False
# only relevant if vae tiling is enabled
_UpperCamelCase : int = self.config.sample_size
_UpperCamelCase : Union[str, Any] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
_UpperCamelCase : Dict = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
_UpperCamelCase : List[Any] = 0.25
def __SCREAMING_SNAKE_CASE ( self : str , __a : Optional[int] , __a : str=False ) -> Tuple:
if isinstance(__a , (Encoder, Decoder) ):
_UpperCamelCase : Any = value
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Any = True ) -> List[str]:
_UpperCamelCase : str = use_tiling
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
self.enable_tiling(__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
_UpperCamelCase : int = True
def __SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
_UpperCamelCase : List[str] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict[str, AttentionProcessor]:
_UpperCamelCase : Tuple = {}
def fn_recursive_add_processors(__a : List[str] , __a : Tuple , __a : Tuple ):
if hasattr(__a , "set_processor" ):
_UpperCamelCase : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , __a , __a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__a , __a , __a )
return processors
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[Any] ) -> Optional[int]:
_UpperCamelCase : Dict = len(self.attn_processors.keys() )
if isinstance(__a , __a ) and len(__a ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(__a )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(__a : Tuple , __a : Dict , __a : Optional[Any] ):
if hasattr(__a , "set_processor" ):
if not isinstance(__a , __a ):
module.set_processor(__a )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , __a , __a )
for name, module in self.named_children():
fn_recursive_attn_processor(__a , __a , __a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Any , __a : Optional[Any] = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(__a , return_dict=__a )
if self.use_slicing and x.shape[0] > 1:
_UpperCamelCase : int = [self.encoder(__a ) for x_slice in x.split(1 )]
_UpperCamelCase : int = torch.cat(__a )
else:
_UpperCamelCase : Optional[Any] = self.encoder(__a )
_UpperCamelCase : Dict = self.quant_conv(__a )
_UpperCamelCase : Union[str, Any] = DiagonalGaussianDistribution(__a )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[int] , __a : Optional[int] = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(__a , return_dict=__a )
_UpperCamelCase : Optional[Any] = self.post_quant_conv(__a )
_UpperCamelCase : Optional[int] = self.decoder(__a )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__a )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self : str , __a : Optional[int] , __a : List[Any] = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
_UpperCamelCase : List[str] = [self._decode(__a ).sample for z_slice in z.split(1 )]
_UpperCamelCase : List[Any] = torch.cat(__a )
else:
_UpperCamelCase : List[Any] = self._decode(__a ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=__a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[Any] , __a : Any , __a : Union[str, Any] ) -> Any:
_UpperCamelCase : int = min(a.shape[2] , b.shape[2] , __a )
for y in range(__a ):
_UpperCamelCase : Tuple = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[int] , __a : str , __a : Dict ) -> str:
_UpperCamelCase : Optional[Any] = min(a.shape[3] , b.shape[3] , __a )
for x in range(__a ):
_UpperCamelCase : Tuple = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Dict , __a : Optional[Any] = True ) -> AutoencoderKLOutput:
_UpperCamelCase : Union[str, Any] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
_UpperCamelCase : Optional[Any] = int(self.tile_latent_min_size * self.tile_overlap_factor )
_UpperCamelCase : Tuple = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
_UpperCamelCase : List[str] = []
for i in range(0 , x.shape[2] , __a ):
_UpperCamelCase : Optional[Any] = []
for j in range(0 , x.shape[3] , __a ):
_UpperCamelCase : Union[str, Any] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
_UpperCamelCase : List[str] = self.encoder(__a )
_UpperCamelCase : List[str] = self.quant_conv(__a )
row.append(__a )
rows.append(__a )
_UpperCamelCase : Union[str, Any] = []
for i, row in enumerate(__a ):
_UpperCamelCase : int = []
for j, tile in enumerate(__a ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_UpperCamelCase : List[Any] = self.blend_v(rows[i - 1][j] , __a , __a )
if j > 0:
_UpperCamelCase : int = self.blend_h(row[j - 1] , __a , __a )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__a , dim=3 ) )
_UpperCamelCase : Optional[int] = torch.cat(__a , dim=2 )
_UpperCamelCase : Tuple = DiagonalGaussianDistribution(__a )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__a )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Dict , __a : List[Any] = True ) -> Union[DecoderOutput, torch.FloatTensor]:
_UpperCamelCase : List[str] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
_UpperCamelCase : Optional[Any] = int(self.tile_sample_min_size * self.tile_overlap_factor )
_UpperCamelCase : Optional[Any] = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
_UpperCamelCase : int = []
for i in range(0 , z.shape[2] , __a ):
_UpperCamelCase : Union[str, Any] = []
for j in range(0 , z.shape[3] , __a ):
_UpperCamelCase : int = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
_UpperCamelCase : Tuple = self.post_quant_conv(__a )
_UpperCamelCase : int = self.decoder(__a )
row.append(__a )
rows.append(__a )
_UpperCamelCase : Union[str, Any] = []
for i, row in enumerate(__a ):
_UpperCamelCase : Union[str, Any] = []
for j, tile in enumerate(__a ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_UpperCamelCase : str = self.blend_v(rows[i - 1][j] , __a , __a )
if j > 0:
_UpperCamelCase : str = self.blend_h(row[j - 1] , __a , __a )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__a , dim=3 ) )
_UpperCamelCase : Union[str, Any] = torch.cat(__a , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : int , __a : Optional[int] = False , __a : Any = True , __a : str = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
_UpperCamelCase : int = sample
_UpperCamelCase : Optional[int] = self.encode(__a ).latent_dist
if sample_posterior:
_UpperCamelCase : Dict = posterior.sample(generator=__a )
else:
_UpperCamelCase : Tuple = posterior.mode()
_UpperCamelCase : Optional[Any] = self.decode(__a ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__a )
| 353
|
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCamelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
lowerCamelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
lowerCamelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[List[List[str]]] , __a : List[List[str]] , __a : int = 1 , __a : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__a , hypotheses=__a , min_len=__a , max_len=__a )
}
| 310
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = StableDiffusionLatentUpscalePipeline
SCREAMING_SNAKE_CASE__ :Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
SCREAMING_SNAKE_CASE__ :Dict = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
SCREAMING_SNAKE_CASE__ :str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ :Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE__ :str = frozenset([] )
SCREAMING_SNAKE_CASE__ :Optional[int] = True
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> int:
_UpperCamelCase : int = 1
_UpperCamelCase : int = 4
_UpperCamelCase : List[str] = (16, 16)
_UpperCamelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCAmelCase )
return image
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=_lowerCAmelCase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=_lowerCAmelCase , only_cross_attention=_lowerCAmelCase , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
_UpperCamelCase : Dict = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
_UpperCamelCase : Tuple = EulerDiscreteScheduler(prediction_type="sample" )
_UpperCamelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="quick_gelu" , projection_dim=512 , )
_UpperCamelCase : List[str] = CLIPTextModel(_lowerCAmelCase )
_UpperCamelCase : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCamelCase : Any = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] , __a : str=0 ) -> str:
if str(_lowerCAmelCase ).startswith("mps" ):
_UpperCamelCase : List[Any] = torch.manual_seed(_lowerCAmelCase )
else:
_UpperCamelCase : Optional[Any] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_UpperCamelCase : str = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
_UpperCamelCase : Optional[Any] = "cpu"
_UpperCamelCase : Optional[int] = self.get_dummy_components()
_UpperCamelCase : int = self.pipeline_class(**_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_UpperCamelCase : Dict = self.get_dummy_inputs(_lowerCAmelCase )
_UpperCamelCase : List[Any] = pipe(**_lowerCAmelCase ).images
_UpperCamelCase : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
_UpperCamelCase : str = np.array(
[0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] )
_UpperCamelCase : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCAmelCase , 1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
super().test_save_load_local(expected_max_difference=3e-3 )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
_UpperCamelCase : Tuple = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
_UpperCamelCase : int = self.get_dummy_components()
_UpperCamelCase : List[Any] = self.pipeline_class(**_lowerCAmelCase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_UpperCamelCase : List[str] = self.get_dummy_inputs(_lowerCAmelCase )
_UpperCamelCase : Optional[Any] = 2
_UpperCamelCase : Dict = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_UpperCamelCase : str = getattr(_lowerCAmelCase , scheduler_enum.name )
_UpperCamelCase : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
_UpperCamelCase : Dict = pipe(**_lowerCAmelCase )[0]
outputs.append(_lowerCAmelCase )
assert check_same_shape(_lowerCAmelCase )
@require_torch_gpu
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = torch.manual_seed(33 )
_UpperCamelCase : List[str] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
_UpperCamelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
_UpperCamelCase : List[str] = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
_UpperCamelCase : List[str] = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , output_type="latent" ).images
_UpperCamelCase : Optional[Any] = upscaler(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=20 , guidance_scale=0 , generator=_lowerCAmelCase , output_type="np" , ).images[0]
_UpperCamelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5e-2
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
_UpperCamelCase : List[str] = torch.manual_seed(33 )
_UpperCamelCase : Tuple = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
_UpperCamelCase : Tuple = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
_UpperCamelCase : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
_UpperCamelCase : Optional[int] = upscaler(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=20 , guidance_scale=0 , generator=_lowerCAmelCase , output_type="np" , ).images[0]
_UpperCamelCase : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5e-2
| 354
|
"""simple docstring"""
from __future__ import annotations
from math import pi
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int ) -> Any:
_UpperCamelCase : Dict = {}
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int ) -> None:
_UpperCamelCase : Union[str, Any] = {}
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Tuple , __a : Union[str, Any] , __a : Dict ) -> None:
if nodea not in self.connections:
self.add_node(_SCREAMING_SNAKE_CASE )
if nodea not in self.connections:
self.add_node(_SCREAMING_SNAKE_CASE )
_UpperCamelCase : List[Any] = probability
def __SCREAMING_SNAKE_CASE ( self : int ) -> list[str]:
return list(self.connections )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : str ) -> str:
_UpperCamelCase : Tuple = 0
_UpperCamelCase : Tuple = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict[str, int]:
"""simple docstring"""
_UpperCamelCase : List[str] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_A ,_A ,_A )
_UpperCamelCase : Any = Counter(graph.get_nodes() )
_UpperCamelCase : List[str] = start
for _ in range(_A ):
_UpperCamelCase : int = graph.transition(_A )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355
|
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCamelCase__ = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCamelCase__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if "://" in dataset_path:
_UpperCamelCase : List[Any] = dataset_path.split("://" )[1]
return dataset_path
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = not is_remote_filesystem(lowercase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase_ ) ,fs._strip_protocol(lowercase_ ) )
else:
fs.mv(lowercase_ ,lowercase_ ,recursive=lowercase_ )
def lowercase__ ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn ,"reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_UpperCamelCase : Dict = None
_UpperCamelCase : str = None
_UpperCamelCase : str = threading.Lock()
| 310
| 0
|
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if not isinstance(lowercase_ ,lowercase_ ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
_UpperCamelCase : Optional[int] = precision
_UpperCamelCase : Tuple = ceil(precision / 14 )
_UpperCamelCase : Dict = 426_880 * Decimal(10_005 ).sqrt()
_UpperCamelCase : List[str] = 1
_UpperCamelCase : Tuple = 13_591_409
_UpperCamelCase : Optional[Any] = Decimal(lowercase_ )
for k in range(1 ,lowercase_ ):
_UpperCamelCase : Dict = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase_ ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCamelCase__ = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 356
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 310
| 0
|
"""simple docstring"""
import numpy as np
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : int = int(np.ceil((x_end - xa) / h ) )
_UpperCamelCase : List[str] = np.zeros((n + 1,) )
_UpperCamelCase : List[str] = ya
_UpperCamelCase : Any = xa
for k in range(lowercase_ ):
_UpperCamelCase : Optional[int] = f(lowercase_ ,y[k] )
_UpperCamelCase : Any = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
_UpperCamelCase : Dict = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
_UpperCamelCase : int = f(x + h ,y[k] + h * ka )
_UpperCamelCase : Union[str, Any] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowerCamelCase__ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
lowerCamelCase__ = f"""https://www.google.com/search?q={query}&num=100"""
lowerCamelCase__ = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
lowerCamelCase__ = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
lowerCamelCase__ = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 310
| 0
|
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCamelCase__ = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowerCamelCase__ = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'''config.{attribute}''' in modeling_source
or F'''getattr(config, \"{attribute}\"''' in modeling_source
or F'''getattr(self.config, \"{attribute}\"''' in modeling_source
):
_UpperCamelCase : Union[str, Any] = True
# Deal with multi-line cases
elif (
re.search(
rF'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"''' ,A__ ,)
is not None
):
_UpperCamelCase : int = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_UpperCamelCase : Optional[int] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_UpperCamelCase : Optional[int] = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
_UpperCamelCase : List[str] = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
_UpperCamelCase : List[Any] = True
if not attribute_used:
_UpperCamelCase : Tuple = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_UpperCamelCase : Optional[Any] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_UpperCamelCase : Tuple = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_UpperCamelCase : Tuple = True
elif attribute.endswith("_token_id" ):
_UpperCamelCase : List[str] = True
# configuration class specific cases
if not case_allowed:
_UpperCamelCase : Any = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ ,[] )
_UpperCamelCase : Tuple = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = dict(inspect.signature(config_class.__init__ ).parameters )
_UpperCamelCase : Dict = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
_UpperCamelCase : Optional[Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_UpperCamelCase : List[Any] = {}
if len(config_class.attribute_map ) > 0:
_UpperCamelCase : Tuple = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_UpperCamelCase : Union[str, Any] = inspect.getsourcefile(A__ )
_UpperCamelCase : List[Any] = os.path.dirname(A__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_UpperCamelCase : Dict = [os.path.join(A__ ,A__ ) for fn in os.listdir(A__ ) if fn.startswith("modeling_" )]
# Get the source code strings
_UpperCamelCase : int = []
for path in modeling_paths:
if os.path.isfile(A__ ):
with open(A__ ) as fp:
modeling_sources.append(fp.read() )
_UpperCamelCase : Optional[int] = []
for config_param, default_value in zip(A__ ,A__ ):
# `attributes` here is all the variant names for `config_param`
_UpperCamelCase : Optional[Any] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(A__ ,A__ ,A__ ,A__ ):
unused_attributes.append(attributes[0] )
return sorted(A__ )
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Tuple = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_UpperCamelCase : Optional[int] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) ,lambda lowercase_ : inspect.isclass(A__ )
and issubclass(A__ ,A__ )
and inspect.getmodule(A__ ) == inspect.getmodule(_config_class ) ,)
]
for config_class in config_classes_in_module:
_UpperCamelCase : Union[str, Any] = check_config_attributes_being_used(A__ )
if len(A__ ) > 0:
_UpperCamelCase : str = unused_attributes
if len(A__ ) > 0:
_UpperCamelCase : int = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += F'''{name}: {attributes}\n'''
raise ValueError(A__ )
if __name__ == "__main__":
check_config_attributes()
| 358
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = "xlm-roberta-xl"
def __init__( self : Any , __a : Tuple=25_0880 , __a : Optional[Any]=2560 , __a : List[str]=36 , __a : Any=32 , __a : Dict=1_0240 , __a : Optional[Any]="gelu" , __a : int=0.1 , __a : Tuple=0.1 , __a : str=514 , __a : Any=1 , __a : List[Any]=0.02 , __a : List[str]=1e-0_5 , __a : Optional[Any]=1 , __a : List[Any]=0 , __a : Tuple=2 , __a : int="absolute" , __a : Dict=True , __a : Dict=None , **__a : Tuple , ) -> str:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Dict = max_position_embeddings
_UpperCamelCase : Optional[Any] = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Union[str, Any] = use_cache
_UpperCamelCase : Optional[Any] = classifier_dropout
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCamelCase : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 310
| 0
|
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __a : Tuple ) -> List[Any]:
if isinstance(__a , __a ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
_UpperCamelCase : Dict = deepcopy(__a )
elif os.path.exists(__a ):
with io.open(__a , "r" , encoding="utf-8" ) as f:
_UpperCamelCase : Any = json.load(__a )
else:
try:
_UpperCamelCase : Optional[int] = baseaa.urlsafe_baadecode(__a ).decode("utf-8" )
_UpperCamelCase : str = json.loads(__a )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
_UpperCamelCase : str = config
self.set_stage_and_offload()
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
_UpperCamelCase : Tuple = self.get_value("zero_optimization.stage" , -1 )
# offload
_UpperCamelCase : List[str] = False
if self.is_zeroa() or self.is_zeroa():
_UpperCamelCase : Any = set(["cpu", "nvme"] )
_UpperCamelCase : Union[str, Any] = set(
[
self.get_value("zero_optimization.offload_optimizer.device" ),
self.get_value("zero_optimization.offload_param.device" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
_UpperCamelCase : Tuple = True
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[int] ) -> Dict:
_UpperCamelCase : Optional[int] = self.config
# find the config node of interest if it exists
_UpperCamelCase : Tuple = ds_key_long.split("." )
_UpperCamelCase : int = nodes.pop()
for node in nodes:
_UpperCamelCase : int = config.get(__a )
if config is None:
return None, ds_key
return config, ds_key
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any]=None ) -> int:
_UpperCamelCase : Dict = self.find_config_node(__a )
if config is None:
return default
return config.get(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[Any] , __a : Tuple=False ) -> Any:
_UpperCamelCase : str = self.config
# find the config node of interest if it exists
_UpperCamelCase : Optional[int] = ds_key_long.split("." )
for node in nodes:
_UpperCamelCase : str = config
_UpperCamelCase : Optional[int] = config.get(__a )
if config is None:
if must_exist:
raise ValueError(F'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = self.get_value(__a )
return False if value is None else bool(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Dict ) -> List[str]:
_UpperCamelCase : str = self.get_value(__a )
return False if value is None else not bool(__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
return self._stage == 2
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
return self._stage == 3
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
return self._offload
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , __a : Dict ) -> List[str]:
_UpperCamelCase : str = engine
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Dict , **__a : str ) -> int:
# runs backpropagation and handles mixed precision
self.engine.backward(__a , **__a )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self : List[str] , __a : Dict ) -> Any:
super().__init__(__a , device_placement=__a , scaler=__a )
_UpperCamelCase : str = hasattr(self.optimizer , "overflow" )
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[str]=None ) -> Tuple:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self : List[str] , __a : int , __a : Tuple ) -> Tuple:
super().__init__(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Any , __a : str , __a : Optional[int]=0.0_01 , __a : List[str]=0 , **__a : List[str] ) -> str:
_UpperCamelCase : str = params
_UpperCamelCase : List[Any] = lr
_UpperCamelCase : Optional[int] = weight_decay
_UpperCamelCase : Dict = kwargs
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __a : Dict , __a : Optional[int]=None , __a : Union[str, Any]=0 , **__a : List[str] ) -> Tuple:
_UpperCamelCase : Optional[int] = optimizer
_UpperCamelCase : List[str] = total_num_steps
_UpperCamelCase : str = warmup_num_steps
_UpperCamelCase : str = kwargs
| 359
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( *__a : int , **__a : int ) -> List[Any]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , __a : Optional[int] , __a : str ) -> Optional[Any]:
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , image_processor=__a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : Union[str, Any] ) -> int:
_UpperCamelCase : Any = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
import datasets
_UpperCamelCase : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_UpperCamelCase : List[Any] = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
_UpperCamelCase : List[Any] = object_detector(__a , threshold=0.0 )
self.assertEqual(len(__a ) , len(__a ) )
for outputs in batch_outputs:
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
pass
@require_torch
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[str] = "hf-internal-testing/tiny-detr-mobilenetsv3"
_UpperCamelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
_UpperCamelCase : Any = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = "facebook/detr-resnet-50"
_UpperCamelCase : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : Union[str, Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : List[str] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Dict = "facebook/detr-resnet-50"
_UpperCamelCase : Optional[Any] = pipeline("object-detection" , model=__a )
_UpperCamelCase : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : Tuple = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
_UpperCamelCase : Tuple = 0.99_85
_UpperCamelCase : List[Any] = "facebook/detr-resnet-50"
_UpperCamelCase : List[str] = pipeline("object-detection" , model=__a )
_UpperCamelCase : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__a )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = "Narsil/layoutlmv3-finetuned-funsd"
_UpperCamelCase : int = 0.99_93
_UpperCamelCase : str = pipeline("object-detection" , model=__a , threshold=__a )
_UpperCamelCase : Union[str, Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 310
| 0
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __SCREAMING_SNAKE_CASE ( _lowercase ):
'''simple docstring'''
def __init__( self : Tuple , __a : Any , __a : List[str]=13 , __a : Optional[Any]=7 , __a : Union[str, Any]=True , __a : Union[str, Any]=True , __a : Dict=False , __a : Optional[Any]=True , __a : Any=99 , __a : int=32 , __a : str=5 , __a : Dict=4 , __a : Union[str, Any]=64 , __a : List[Any]="gelu" , __a : Dict=0.1 , __a : List[Any]=0.1 , __a : int=512 , __a : Dict=16 , __a : Any=2 , __a : str=0.02 , __a : Dict=3 , __a : Dict=4 , __a : Tuple=None , __a : Optional[int]=2 , __a : Tuple=2 , __a : int=2 , __a : Union[str, Any]=2 , __a : Any=4 , __a : Optional[Any]=1 , ) -> Optional[int]:
_UpperCamelCase : int = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : List[Any] = seq_length
_UpperCamelCase : int = is_training
_UpperCamelCase : int = use_input_mask
_UpperCamelCase : Tuple = use_token_type_ids
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : Optional[int] = vocab_size
_UpperCamelCase : str = hidden_size
_UpperCamelCase : int = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Optional[int] = hidden_act
_UpperCamelCase : Union[str, Any] = hidden_dropout_prob
_UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCamelCase : List[str] = max_position_embeddings
_UpperCamelCase : Union[str, Any] = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : Dict = num_labels
_UpperCamelCase : List[str] = num_choices
_UpperCamelCase : Optional[Any] = scope
_UpperCamelCase : Dict = q_groups
_UpperCamelCase : Any = k_groups
_UpperCamelCase : List[Any] = v_groups
_UpperCamelCase : Optional[int] = post_attention_groups
_UpperCamelCase : Optional[int] = intermediate_groups
_UpperCamelCase : Tuple = output_groups
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
_UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : List[str] = None
if self.use_input_mask:
_UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : Dict = None
_UpperCamelCase : Any = None
_UpperCamelCase : Any = None
if self.use_labels:
_UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Tuple , __a : Any , __a : List[Any] , __a : List[Any] , __a : Tuple , __a : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[int] = SqueezeBertModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase : Tuple = model(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase : Dict = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[int] , __a : Tuple , __a : Tuple , __a : str , __a : Union[str, Any] , __a : Optional[Any] ) -> Tuple:
_UpperCamelCase : Union[str, Any] = SqueezeBertForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase : List[str] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int , __a : Any , __a : Optional[int] , __a : List[Any] , __a : List[str] , __a : Any ) -> Tuple:
_UpperCamelCase : List[str] = SqueezeBertForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase : Any = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : List[str] , __a : int , __a : Union[str, Any] , __a : str , __a : Optional[Any] ) -> List[str]:
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Dict = SqueezeBertForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase : Dict = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Dict , __a : Union[str, Any] , __a : int , __a : Dict , __a : Optional[int] , __a : Tuple ) -> Dict:
_UpperCamelCase : Union[str, Any] = self.num_labels
_UpperCamelCase : Any = SqueezeBertForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase : int = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Union[str, Any] , __a : int , __a : Optional[Any] , __a : str , __a : str , __a : Tuple ) -> Optional[int]:
_UpperCamelCase : str = self.num_choices
_UpperCamelCase : List[Any] = SqueezeBertForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase : Union[str, Any] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
((_UpperCamelCase), (_UpperCamelCase), (_UpperCamelCase), (_UpperCamelCase), (_UpperCamelCase), (_UpperCamelCase)) : Dict = config_and_inputs
_UpperCamelCase : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE__ :List[str] = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False
SCREAMING_SNAKE_CASE__ :int = True
SCREAMING_SNAKE_CASE__ :Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
_UpperCamelCase : List[Any] = SqueezeBertModelTester(self )
_UpperCamelCase : Dict = ConfigTester(self , config_class=__UpperCamelCase , dim=37 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*__UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*__UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*__UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*__UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*__UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*__UpperCamelCase )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[Any] = SqueezeBertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : Any = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
_UpperCamelCase : Any = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
_UpperCamelCase : List[str] = model(__UpperCamelCase )[0]
_UpperCamelCase : Union[str, Any] = torch.Size((1, 3) )
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCamelCase : Tuple = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] )
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-4 ) )
| 360
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCamelCase__ = {"UserAgent": UserAgent().random}
def lowercase__ ( lowercase_ ) -> dict:
"""simple docstring"""
_UpperCamelCase : str = script.contents[0]
_UpperCamelCase : Any = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : str ) -> Tuple:
_UpperCamelCase : List[str] = F'''https://www.instagram.com/{username}/'''
_UpperCamelCase : Optional[Any] = self.get_json()
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> dict:
_UpperCamelCase : int = requests.get(self.url , headers=__a ).text
_UpperCamelCase : Union[str, Any] = BeautifulSoup(__a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : List[Any] ) -> str:
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : str ) -> str:
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
return self.user_data["username"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["full_name"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
return self.user_data["biography"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.user_data["business_email"]
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.user_data["external_url"]
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
return self.user_data["is_verified"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool:
return self.user_data["is_private"]
def lowercase__ ( lowercase_ = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
_UpperCamelCase : Union[str, Any] = InstagramUser(lowercase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,lowercase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = InstagramUser("github")
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 310
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
class __SCREAMING_SNAKE_CASE ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ :List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ :Tuple = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE__ :Optional[int] = RobertaTokenizer
def __init__( self : int , __a : Optional[int]=None , __a : int=None , __a : Union[str, Any]=None , __a : List[Any]="replace" , __a : int="<s>" , __a : List[str]="</s>" , __a : int="</s>" , __a : List[Any]="<s>" , __a : List[Any]="<unk>" , __a : str="<pad>" , __a : Dict="<mask>" , __a : int=False , __a : List[Any]=True , **__a : Dict , ) -> Dict:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
_UpperCamelCase : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase__ ) != add_prefix_space:
_UpperCamelCase : int = getattr(UpperCAmelCase__ , pre_tok_state.pop("type" ) )
_UpperCamelCase : Optional[int] = add_prefix_space
_UpperCamelCase : Tuple = pre_tok_class(**UpperCAmelCase__ )
_UpperCamelCase : List[Any] = add_prefix_space
_UpperCamelCase : Tuple = "post_processor"
_UpperCamelCase : int = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
_UpperCamelCase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCamelCase : Union[str, Any] = tuple(state["sep"] )
if "cls" in state:
_UpperCamelCase : Optional[Any] = tuple(state["cls"] )
_UpperCamelCase : Optional[int] = False
if state.get("add_prefix_space" , UpperCAmelCase__ ) != add_prefix_space:
_UpperCamelCase : Tuple = add_prefix_space
_UpperCamelCase : List[Any] = True
if state.get("trim_offsets" , UpperCAmelCase__ ) != trim_offsets:
_UpperCamelCase : Optional[Any] = trim_offsets
_UpperCamelCase : int = True
if changes_to_apply:
_UpperCamelCase : int = getattr(UpperCAmelCase__ , state.pop("type" ) )
_UpperCamelCase : int = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __SCREAMING_SNAKE_CASE ( self : Any , __a : str ) -> str:
_UpperCamelCase : Dict = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
_UpperCamelCase : List[str] = value
def __SCREAMING_SNAKE_CASE ( self : List[Any] , *__a : Dict , **__a : Optional[int] ) -> BatchEncoding:
_UpperCamelCase : Dict = kwargs.get("is_split_into_words" , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] , *__a : Union[str, Any] , **__a : Tuple ) -> BatchEncoding:
_UpperCamelCase : Optional[int] = kwargs.get("is_split_into_words" , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
_UpperCamelCase : Dict = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : int , __a : Tuple=None ) -> int:
_UpperCamelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 361
|
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = tau * frequency / samplerate
_UpperCamelCase : Optional[int] = sin(lowercase_ )
_UpperCamelCase : Dict = cos(lowercase_ )
_UpperCamelCase : Any = _sin / (2 * q_factor)
_UpperCamelCase : str = (1 - _cos) / 2
_UpperCamelCase : Any = 1 - _cos
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : List[str] = -2 * _cos
_UpperCamelCase : Tuple = 1 - alpha
_UpperCamelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : List[str] = tau * frequency / samplerate
_UpperCamelCase : str = sin(lowercase_ )
_UpperCamelCase : Optional[Any] = cos(lowercase_ )
_UpperCamelCase : Dict = _sin / (2 * q_factor)
_UpperCamelCase : List[Any] = (1 + _cos) / 2
_UpperCamelCase : Optional[int] = -1 - _cos
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : int = -2 * _cos
_UpperCamelCase : str = 1 - alpha
_UpperCamelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Tuple = tau * frequency / samplerate
_UpperCamelCase : Optional[int] = sin(lowercase_ )
_UpperCamelCase : Dict = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Dict = _sin / 2
_UpperCamelCase : int = 0
_UpperCamelCase : str = -ba
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : Optional[int] = -2 * _cos
_UpperCamelCase : Optional[Any] = 1 - alpha
_UpperCamelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : str = tau * frequency / samplerate
_UpperCamelCase : Optional[Any] = sin(lowercase_ )
_UpperCamelCase : Optional[int] = cos(lowercase_ )
_UpperCamelCase : int = _sin / (2 * q_factor)
_UpperCamelCase : List[str] = 1 - alpha
_UpperCamelCase : int = -2 * _cos
_UpperCamelCase : Union[str, Any] = 1 + alpha
_UpperCamelCase : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : int = tau * frequency / samplerate
_UpperCamelCase : int = sin(lowercase_ )
_UpperCamelCase : List[Any] = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Optional[int] = 10 ** (gain_db / 40)
_UpperCamelCase : str = 1 + alpha * big_a
_UpperCamelCase : Union[str, Any] = -2 * _cos
_UpperCamelCase : Optional[int] = 1 - alpha * big_a
_UpperCamelCase : int = 1 + alpha / big_a
_UpperCamelCase : Optional[Any] = -2 * _cos
_UpperCamelCase : Any = 1 - alpha / big_a
_UpperCamelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = tau * frequency / samplerate
_UpperCamelCase : Any = sin(lowercase_ )
_UpperCamelCase : Union[str, Any] = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Union[str, Any] = 10 ** (gain_db / 40)
_UpperCamelCase : Dict = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase : int = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase : Dict = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase : int = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase : List[str] = 2 * sqrt(lowercase_ ) * alpha
_UpperCamelCase : Any = big_a * (pmc + aaa)
_UpperCamelCase : Dict = 2 * big_a * mpc
_UpperCamelCase : str = big_a * (pmc - aaa)
_UpperCamelCase : Dict = ppmc + aaa
_UpperCamelCase : List[Any] = -2 * pmpc
_UpperCamelCase : Dict = ppmc - aaa
_UpperCamelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Optional[int] = tau * frequency / samplerate
_UpperCamelCase : int = sin(lowercase_ )
_UpperCamelCase : Any = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : str = 10 ** (gain_db / 40)
_UpperCamelCase : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase : List[str] = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase : Dict = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase : Optional[Any] = 2 * sqrt(lowercase_ ) * alpha
_UpperCamelCase : List[Any] = big_a * (ppmc + aaa)
_UpperCamelCase : Dict = -2 * big_a * pmpc
_UpperCamelCase : Dict = big_a * (ppmc - aaa)
_UpperCamelCase : Optional[Any] = pmc + aaa
_UpperCamelCase : Any = 2 * mpc
_UpperCamelCase : Any = pmc - aaa
_UpperCamelCase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
| 310
| 0
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowerCamelCase__ = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : str ) -> Dict:
_UpperCamelCase : str = TOKEN
HfFolder.save_token(_a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[str] ) -> int:
try:
delete_repo(token=cls._token , repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
_UpperCamelCase : Dict = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
_UpperCamelCase : Optional[int] = FlaxBertModel(_a )
model.push_to_hub("test-model-flax" , use_auth_token=self._token )
_UpperCamelCase : List[Any] = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
_UpperCamelCase : Tuple = flatten_dict(unfreeze(model.params ) )
_UpperCamelCase : Any = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_UpperCamelCase : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1e-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_a , repo_id="test-model-flax" , push_to_hub=_a , use_auth_token=self._token )
_UpperCamelCase : List[Any] = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
_UpperCamelCase : List[Any] = flatten_dict(unfreeze(model.params ) )
_UpperCamelCase : str = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_UpperCamelCase : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1e-3 , msg=F'''{key} not identical''' )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
_UpperCamelCase : Dict = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
_UpperCamelCase : List[str] = FlaxBertModel(_a )
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token )
_UpperCamelCase : Optional[int] = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
_UpperCamelCase : Union[str, Any] = flatten_dict(unfreeze(model.params ) )
_UpperCamelCase : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_UpperCamelCase : List[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1e-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_a , repo_id="valid_org/test-model-flax-org" , push_to_hub=_a , use_auth_token=self._token )
_UpperCamelCase : str = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
_UpperCamelCase : List[Any] = flatten_dict(unfreeze(model.params ) )
_UpperCamelCase : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_UpperCamelCase : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1e-3 , msg=F'''{key} not identical''' )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Tuple = True
_UpperCamelCase : List[str] = flatten_dict(modela.params )
_UpperCamelCase : List[str] = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
_UpperCamelCase : Tuple = False
return models_are_equal
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : str ) -> int:
_UpperCamelCase : Union[str, Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
_UpperCamelCase : List[str] = FlaxBertModel(_a )
_UpperCamelCase : Optional[int] = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) )
with self.assertRaises(_a ):
_UpperCamelCase : Union[str, Any] = FlaxBertModel.from_pretrained(_a )
_UpperCamelCase : List[Any] = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : Optional[int] = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
_UpperCamelCase : Union[str, Any] = FlaxBertModel(_a )
_UpperCamelCase : List[str] = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) , max_shard_size="10KB" )
with self.assertRaises(_a ):
_UpperCamelCase : Dict = FlaxBertModel.from_pretrained(_a )
_UpperCamelCase : Any = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Union[str, Any] = """bert"""
_UpperCamelCase : Dict = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(_a ):
_UpperCamelCase : Union[str, Any] = FlaxBertModel.from_pretrained(_a )
_UpperCamelCase : int = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
_UpperCamelCase : Optional[int] = """bert"""
_UpperCamelCase : Any = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(_a ):
_UpperCamelCase : Dict = FlaxBertModel.from_pretrained(_a )
_UpperCamelCase : List[Any] = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
| 362
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ )
if weight_type is not None:
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ).shape
else:
_UpperCamelCase : int = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "weight_g":
_UpperCamelCase : int = value
elif weight_type == "weight_v":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "bias":
_UpperCamelCase : int = value
else:
_UpperCamelCase : Any = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[str] = []
_UpperCamelCase : Any = fairseq_model.state_dict()
_UpperCamelCase : Union[str, Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,hf_model.config.feat_extract_norm == "group" ,)
_UpperCamelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
_UpperCamelCase : Dict = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_UpperCamelCase : Any = True
if "*" in mapped_key:
_UpperCamelCase : Dict = name.split(lowercase_ )[0].split("." )[-2]
_UpperCamelCase : Any = mapped_key.replace("*" ,lowercase_ )
if "weight_g" in name:
_UpperCamelCase : str = "weight_g"
elif "weight_v" in name:
_UpperCamelCase : Any = "weight_v"
elif "weight" in name:
_UpperCamelCase : List[str] = "weight"
elif "bias" in name:
_UpperCamelCase : List[Any] = "bias"
else:
_UpperCamelCase : str = None
set_recursively(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Any = full_name.split("conv_layers." )[-1]
_UpperCamelCase : Optional[Any] = name.split("." )
_UpperCamelCase : Union[str, Any] = int(items[0] )
_UpperCamelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCamelCase : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCamelCase : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCamelCase : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCamelCase : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Dict = SEWConfig()
if is_finetuned:
_UpperCamelCase : Dict = model.wav_encoder.wav_model.cfg
else:
_UpperCamelCase : List[Any] = model.cfg
_UpperCamelCase : Any = fs_config.conv_bias
_UpperCamelCase : str = eval(fs_config.conv_feature_layers )
_UpperCamelCase : Any = [x[0] for x in conv_layers]
_UpperCamelCase : List[Any] = [x[1] for x in conv_layers]
_UpperCamelCase : Union[str, Any] = [x[2] for x in conv_layers]
_UpperCamelCase : str = "gelu"
_UpperCamelCase : List[str] = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
_UpperCamelCase : Optional[int] = 0.0
_UpperCamelCase : Dict = fs_config.activation_fn.name
_UpperCamelCase : Any = fs_config.encoder_embed_dim
_UpperCamelCase : Optional[Any] = 0.02
_UpperCamelCase : str = fs_config.encoder_ffn_embed_dim
_UpperCamelCase : int = 1e-5
_UpperCamelCase : Optional[int] = fs_config.encoder_layerdrop
_UpperCamelCase : str = fs_config.encoder_attention_heads
_UpperCamelCase : Tuple = fs_config.conv_pos_groups
_UpperCamelCase : List[str] = fs_config.conv_pos
_UpperCamelCase : Optional[int] = len(lowercase_ )
_UpperCamelCase : Union[str, Any] = fs_config.encoder_layers
_UpperCamelCase : Union[str, Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_UpperCamelCase : List[str] = model.cfg
_UpperCamelCase : List[str] = fs_config.final_dropout
_UpperCamelCase : Optional[Any] = fs_config.layerdrop
_UpperCamelCase : int = fs_config.activation_dropout
_UpperCamelCase : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_UpperCamelCase : int = fs_config.attention_dropout
_UpperCamelCase : int = fs_config.dropout_input
_UpperCamelCase : List[Any] = fs_config.dropout
_UpperCamelCase : List[Any] = fs_config.mask_channel_length
_UpperCamelCase : List[str] = fs_config.mask_channel_prob
_UpperCamelCase : Optional[Any] = fs_config.mask_length
_UpperCamelCase : Optional[int] = fs_config.mask_prob
_UpperCamelCase : List[str] = "Wav2Vec2FeatureExtractor"
_UpperCamelCase : Optional[Any] = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=True ) -> str:
"""simple docstring"""
if is_finetuned:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_UpperCamelCase : str = SEWConfig.from_pretrained(lowercase_ )
else:
_UpperCamelCase : Optional[int] = convert_config(model[0] ,lowercase_ )
_UpperCamelCase : List[str] = model[0].eval()
_UpperCamelCase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False
_UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=lowercase_ ,return_attention_mask=lowercase_ ,)
if is_finetuned:
if dict_path:
_UpperCamelCase : Union[str, Any] = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCamelCase : List[str] = target_dict.pad_index
_UpperCamelCase : Optional[int] = target_dict.bos_index
_UpperCamelCase : Any = target_dict.pad_index
_UpperCamelCase : List[Any] = target_dict.bos_index
_UpperCamelCase : List[str] = target_dict.eos_index
_UpperCamelCase : Optional[Any] = len(target_dict.symbols )
_UpperCamelCase : List[Any] = os.path.join(lowercase_ ,"vocab.json" )
if not os.path.isdir(lowercase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase_ ) )
return
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
with open(lowercase_ ,"w" ,encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices ,lowercase_ )
_UpperCamelCase : Optional[Any] = WavaVecaCTCTokenizer(
lowercase_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=lowercase_ ,)
_UpperCamelCase : List[str] = WavaVecaProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
_UpperCamelCase : List[Any] = SEWForCTC(lowercase_ )
else:
_UpperCamelCase : int = SEWModel(lowercase_ )
feature_extractor.save_pretrained(lowercase_ )
recursively_load_weights(lowercase_ ,lowercase_ ,lowercase_ )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowerCamelCase__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 310
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __a : str , __a : Any=13 , __a : Dict=7 , __a : Tuple=True , __a : List[Any]=True , __a : Tuple=True , __a : int=True , __a : List[str]=99 , __a : Optional[int]=[1, 1, 2] , __a : List[Any]=1 , __a : int=32 , __a : List[str]=4 , __a : Optional[int]=8 , __a : Optional[int]=37 , __a : Optional[Any]="gelu_new" , __a : int=0.1 , __a : str=0.1 , __a : Any=0.0 , __a : Optional[int]=512 , __a : str=3 , __a : Tuple=0.02 , __a : List[str]=3 , __a : List[str]=4 , __a : str=None , __a : List[str]=False , ) -> Optional[int]:
_UpperCamelCase : Tuple = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : Tuple = seq_length
_UpperCamelCase : Tuple = is_training
_UpperCamelCase : List[str] = use_input_mask
_UpperCamelCase : str = use_token_type_ids
_UpperCamelCase : str = use_labels
_UpperCamelCase : Dict = vocab_size
_UpperCamelCase : Any = block_sizes
_UpperCamelCase : Any = num_decoder_layers
_UpperCamelCase : Optional[int] = d_model
_UpperCamelCase : Tuple = n_head
_UpperCamelCase : str = d_head
_UpperCamelCase : str = d_inner
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Any = hidden_dropout
_UpperCamelCase : Optional[Any] = attention_dropout
_UpperCamelCase : Dict = activation_dropout
_UpperCamelCase : List[Any] = max_position_embeddings
_UpperCamelCase : Optional[Any] = type_vocab_size
_UpperCamelCase : List[str] = 2
_UpperCamelCase : Tuple = num_labels
_UpperCamelCase : Optional[Any] = num_choices
_UpperCamelCase : Any = scope
_UpperCamelCase : Any = initializer_std
# Used in the tests to check the size of the first attention layer
_UpperCamelCase : Optional[Any] = n_head
# Used in the tests to check the size of the first hidden state
_UpperCamelCase : str = self.d_model
# Used in the tests to check the number of output hidden states/attentions
_UpperCamelCase : Optional[int] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
_UpperCamelCase : List[str] = self.num_hidden_layers + 2
def __SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : Optional[Any] = None
if self.use_input_mask:
_UpperCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : Any = None
if self.use_token_type_ids:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : str = None
_UpperCamelCase : List[str] = None
if self.use_labels:
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase : int = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : int , __a : int , __a : Any , __a : Optional[Any] , __a : Optional[int] , __a : Optional[int] , ) -> int:
_UpperCamelCase : List[str] = TFFunnelModel(config=__SCREAMING_SNAKE_CASE )
_UpperCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
_UpperCamelCase : Dict = [input_ids, input_mask]
_UpperCamelCase : Dict = model(__SCREAMING_SNAKE_CASE )
_UpperCamelCase : Optional[int] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
_UpperCamelCase : int = False
_UpperCamelCase : List[str] = TFFunnelModel(config=__SCREAMING_SNAKE_CASE )
_UpperCamelCase : str = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
_UpperCamelCase : List[str] = False
_UpperCamelCase : int = TFFunnelModel(config=__SCREAMING_SNAKE_CASE )
_UpperCamelCase : Tuple = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : str , __a : List[str] , __a : int , __a : Optional[Any] , __a : str , __a : Any , __a : str , ) -> Optional[int]:
_UpperCamelCase : List[str] = TFFunnelBaseModel(config=__SCREAMING_SNAKE_CASE )
_UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase : Dict = model(__SCREAMING_SNAKE_CASE )
_UpperCamelCase : List[Any] = [input_ids, input_mask]
_UpperCamelCase : Tuple = model(__SCREAMING_SNAKE_CASE )
_UpperCamelCase : Any = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Tuple = TFFunnelBaseModel(config=__SCREAMING_SNAKE_CASE )
_UpperCamelCase : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
_UpperCamelCase : Tuple = False
_UpperCamelCase : List[str] = TFFunnelBaseModel(config=__SCREAMING_SNAKE_CASE )
_UpperCamelCase : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[int] , __a : int , __a : List[Any] , __a : List[str] , __a : Dict , __a : List[Any] , __a : Tuple , ) -> Union[str, Any]:
_UpperCamelCase : Any = TFFunnelForPreTraining(config=__SCREAMING_SNAKE_CASE )
_UpperCamelCase : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase : List[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Any , __a : str , __a : Tuple , __a : str , __a : str , __a : Tuple , __a : int , ) -> Optional[Any]:
_UpperCamelCase : Any = TFFunnelForMaskedLM(config=__SCREAMING_SNAKE_CASE )
_UpperCamelCase : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase : Any = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Optional[Any] , __a : Any , __a : Optional[Any] , __a : Optional[Any] , __a : Tuple , __a : Optional[Any] , __a : Optional[int] , ) -> Any:
_UpperCamelCase : Tuple = self.num_labels
_UpperCamelCase : Tuple = TFFunnelForSequenceClassification(config=__SCREAMING_SNAKE_CASE )
_UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase : List[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Tuple , __a : Any , __a : List[str] , __a : Any , __a : Tuple , __a : Optional[Any] , __a : List[str] , ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = self.num_choices
_UpperCamelCase : Tuple = TFFunnelForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
_UpperCamelCase : List[Any] = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase : Optional[Any] = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase : Any = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_UpperCamelCase : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Union[str, Any] , __a : Optional[int] , __a : str , __a : List[str] , __a : Tuple , __a : str , __a : List[str] , ) -> str:
_UpperCamelCase : Union[str, Any] = self.num_labels
_UpperCamelCase : str = TFFunnelForTokenClassification(config=__SCREAMING_SNAKE_CASE )
_UpperCamelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase : str = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Any , __a : Tuple , __a : Any , __a : str , __a : Dict , __a : str , __a : Dict , ) -> Any:
_UpperCamelCase : Optional[int] = TFFunnelForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
_UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase : Dict = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : Any = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : int = config_and_inputs
_UpperCamelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( _A , _A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE__ :List[str] = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ :List[str] = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase : Any = TFFunnelModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
@require_tf
class __SCREAMING_SNAKE_CASE ( _A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE__ :Any = False
SCREAMING_SNAKE_CASE__ :Dict = False
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : Dict = TFFunnelModelTester(self , base=__SCREAMING_SNAKE_CASE )
_UpperCamelCase : int = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
| 363
|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : int = prime_factors(lowercase_ )
if is_square_free(lowercase_ ):
return -1 if len(lowercase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 0
|
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowerCamelCase__ = 'bert-base-cased'
lowerCamelCase__ = 'google/pegasus-xsum'
lowerCamelCase__ = [' Sam ate lunch today.', 'Sams lunch ingredients.']
lowerCamelCase__ = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
lowerCamelCase__ = 'patrickvonplaten/t5-tiny-random'
lowerCamelCase__ = 'sshleifer/bart-tiny-random'
lowerCamelCase__ = 'sshleifer/tiny-mbart'
lowerCamelCase__ = 'sshleifer/tiny-marian-en-de'
def lowercase__ ( lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : int = "\n".join(__snake_case )
Path(__snake_case ).open("w" ).writelines(__snake_case )
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__snake_case ,F'''{split}.source''' ) ,__snake_case )
_dump_articles(os.path.join(__snake_case ,F'''{split}.target''' ) ,__snake_case )
return tmp_dir
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : List[str] ) -> Any:
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCamelCase : str = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_UpperCamelCase : List[Any] = max(len(tokenizer.encode(_SCREAMING_SNAKE_CASE ) ) for a in ARTICLES )
_UpperCamelCase : Optional[int] = max(len(tokenizer.encode(_SCREAMING_SNAKE_CASE ) ) for a in SUMMARIES )
_UpperCamelCase : Dict = 4
_UpperCamelCase : Union[str, Any] = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_UpperCamelCase : List[Any] = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
_UpperCamelCase : Union[str, Any] = SeqaSeqDataset(
_SCREAMING_SNAKE_CASE , data_dir=_SCREAMING_SNAKE_CASE , type_path="train" , max_source_length=_SCREAMING_SNAKE_CASE , max_target_length=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , )
_UpperCamelCase : Tuple = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_UpperCamelCase : Union[str, Any] = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : List[str] ) -> List[Any]:
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCamelCase : Any = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_UpperCamelCase : Any = max(len(tokenizer.encode(_SCREAMING_SNAKE_CASE ) ) for a in ARTICLES )
_UpperCamelCase : Any = max(len(tokenizer.encode(_SCREAMING_SNAKE_CASE ) ) for a in SUMMARIES )
_UpperCamelCase : Union[str, Any] = 4
_UpperCamelCase : Optional[int] = LegacySeqaSeqDataset(
_SCREAMING_SNAKE_CASE , data_dir=_SCREAMING_SNAKE_CASE , type_path="train" , max_source_length=20 , max_target_length=_SCREAMING_SNAKE_CASE , )
_UpperCamelCase : Any = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
_UpperCamelCase : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_UpperCamelCase : List[str] = tmp_dir.joinpath("train.source" ).open().readlines()
_UpperCamelCase : Union[str, Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 128 , _SCREAMING_SNAKE_CASE )
_UpperCamelCase : Optional[Any] = {x.name for x in tmp_dir.iterdir()}
_UpperCamelCase : Dict = {x.name for x in save_dir.iterdir()}
_UpperCamelCase : Dict = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_SCREAMING_SNAKE_CASE ) < len(_SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) == 1
assert len(packed_examples[0] ) == sum(len(_SCREAMING_SNAKE_CASE ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
if not FAIRSEQ_AVAILABLE:
return
_UpperCamelCase : Optional[Any] = self._get_dataset(max_len=64 )
_UpperCamelCase : Union[str, Any] = 64
_UpperCamelCase : str = ds.make_dynamic_sampler(_SCREAMING_SNAKE_CASE , required_batch_size_multiple=_SCREAMING_SNAKE_CASE )
_UpperCamelCase : List[str] = [len(_SCREAMING_SNAKE_CASE ) for x in batch_sampler]
assert len(set(_SCREAMING_SNAKE_CASE ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) # no dropped or added examples
_UpperCamelCase : Optional[Any] = DataLoader(_SCREAMING_SNAKE_CASE , batch_sampler=_SCREAMING_SNAKE_CASE , collate_fn=ds.collate_fn , num_workers=2 )
_UpperCamelCase : str = []
_UpperCamelCase : Union[str, Any] = []
for batch in data_loader:
_UpperCamelCase : str = batch["input_ids"].shape
_UpperCamelCase : List[str] = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_UpperCamelCase : List[Any] = np.product(batch["input_ids"].shape )
num_src_per_batch.append(_SCREAMING_SNAKE_CASE )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_SCREAMING_SNAKE_CASE )
assert num_src_per_batch[0] == max(_SCREAMING_SNAKE_CASE )
if failures:
raise AssertionError(F'''too many tokens in {len(_SCREAMING_SNAKE_CASE )} batches''' )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
_UpperCamelCase : Optional[Any] = self._get_dataset(max_len=512 )
_UpperCamelCase : Any = 2
_UpperCamelCase : str = ds.make_sortish_sampler(_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE )
_UpperCamelCase : Union[str, Any] = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , collate_fn=ds.collate_fn , num_workers=2 )
_UpperCamelCase : Optional[Any] = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , collate_fn=ds.collate_fn , num_workers=2 , sampler=_SCREAMING_SNAKE_CASE )
_UpperCamelCase : List[Any] = tokenizer.pad_token_id
def count_pad_tokens(__a : Any , __a : Union[str, Any]="input_ids" ):
return [batch[k].eq(_SCREAMING_SNAKE_CASE ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_SCREAMING_SNAKE_CASE , k="labels" ) ) < sum(count_pad_tokens(_SCREAMING_SNAKE_CASE , k="labels" ) )
assert sum(count_pad_tokens(_SCREAMING_SNAKE_CASE ) ) < sum(count_pad_tokens(_SCREAMING_SNAKE_CASE ) )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : str=1000 , __a : Tuple=128 ) -> Union[str, Any]:
if os.getenv("USE_REAL_DATA" , _SCREAMING_SNAKE_CASE ):
_UpperCamelCase : Optional[int] = "examples/seq2seq/wmt_en_ro"
_UpperCamelCase : List[Any] = max_len * 2 * 64
if not Path(_SCREAMING_SNAKE_CASE ).joinpath("train.len" ).exists():
save_len_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
_UpperCamelCase : List[Any] = "examples/seq2seq/test_data/wmt_en_ro"
_UpperCamelCase : Optional[int] = max_len * 4
save_len_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCamelCase : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCamelCase : List[str] = SeqaSeqDataset(
_SCREAMING_SNAKE_CASE , data_dir=_SCREAMING_SNAKE_CASE , type_path="train" , max_source_length=_SCREAMING_SNAKE_CASE , max_target_length=_SCREAMING_SNAKE_CASE , n_obs=_SCREAMING_SNAKE_CASE , )
return ds, max_tokens, tokenizer
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = self._get_dataset()
_UpperCamelCase : Tuple = set(DistributedSortishSampler(_SCREAMING_SNAKE_CASE , 256 , num_replicas=2 , rank=0 , add_extra_examples=_SCREAMING_SNAKE_CASE ) )
_UpperCamelCase : str = set(DistributedSortishSampler(_SCREAMING_SNAKE_CASE , 256 , num_replicas=2 , rank=1 , add_extra_examples=_SCREAMING_SNAKE_CASE ) )
assert idsa.intersection(_SCREAMING_SNAKE_CASE ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : int ) -> Tuple:
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE )
if tok_name == MBART_TINY:
_UpperCamelCase : List[Any] = SeqaSeqDataset(
_SCREAMING_SNAKE_CASE , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
_UpperCamelCase : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_UpperCamelCase : Tuple = SeqaSeqDataset(
_SCREAMING_SNAKE_CASE , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
_UpperCamelCase : Tuple = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_SCREAMING_SNAKE_CASE ) == 1 if tok_name == BART_TINY else len(_SCREAMING_SNAKE_CASE ) == 0
| 364
|
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = GPTaTokenizer
SCREAMING_SNAKE_CASE__ :Tuple = GPTaTokenizerFast
SCREAMING_SNAKE_CASE__ :Dict = True
SCREAMING_SNAKE_CASE__ :int = {"add_prefix_space": True}
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_UpperCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCamelCase : str = {"unk_token": "<unk>"}
_UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Optional[int] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Union[str, Any] ) -> int:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> Tuple:
_UpperCamelCase : List[Any] = "lower newer"
_UpperCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase : Optional[Any] = "lower newer"
_UpperCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_UpperCamelCase : Any = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : str = tokens + [tokenizer.unk_token]
_UpperCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = "lower newer"
# Testing tokenization
_UpperCamelCase : str = tokenizer.tokenize(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
_UpperCamelCase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
_UpperCamelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : List[Any] = tokenizer.encode(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
_UpperCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_UpperCamelCase : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : int , *__a : int , **__a : List[Any] ) -> Union[str, Any]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int=15 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
_UpperCamelCase : Optional[int] = "This is a simple input"
_UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Dict = ("This is a simple input", "This is a pair")
_UpperCamelCase : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
_UpperCamelCase : Union[str, Any] = "This is a simple input"
_UpperCamelCase : Optional[Any] = ["This is a simple input looooooooong", "This is a simple input"]
_UpperCamelCase : str = ("This is a simple input", "This is a pair")
_UpperCamelCase : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_UpperCamelCase : Union[str, Any] = tokenizer.pad_token_id
_UpperCamelCase : str = tokenizer(__a , padding="max_length" , max_length=30 , return_tensors="np" )
_UpperCamelCase : Tuple = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
_UpperCamelCase : str = tokenizer(*__a , padding="max_length" , max_length=60 , return_tensors="np" )
_UpperCamelCase : Optional[int] = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
_UpperCamelCase : Any = "$$$"
_UpperCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
_UpperCamelCase : int = "This is a simple input"
_UpperCamelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Union[str, Any] = tokenizer.bos_token_id
_UpperCamelCase : str = tokenizer(__a )
_UpperCamelCase : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCamelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_UpperCamelCase : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
_UpperCamelCase : Optional[Any] = [self.get_tokenizer(do_lower_case=__a , add_bos_token=__a )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : Tuple = "Encode this."
_UpperCamelCase : List[str] = "This one too please."
_UpperCamelCase : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a )
encoded_sequence += tokenizer.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer.encode_plus(
__a , __a , add_special_tokens=__a , return_special_tokens_mask=__a , )
_UpperCamelCase : str = encoded_sequence_dict["input_ids"]
_UpperCamelCase : Optional[int] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(__a ) , len(__a ) )
_UpperCamelCase : Union[str, Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__a )
]
_UpperCamelCase : Union[str, Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__a , __a )
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Any = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("test_opt" )
_UpperCamelCase : str = AutoTokenizer.from_pretrained("./test_opt" )
_UpperCamelCase : Optional[Any] = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
_UpperCamelCase : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Union[str, Any] = tokenizer.encode(
__a , )
# Same as above
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[str] = "bos"
_UpperCamelCase : Tuple = tokenizer.get_vocab()["bos"]
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : List[Any] = tokenizer.encode(
__a , )
# We changed the bos token
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("./tok" )
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
_UpperCamelCase : Tuple = tokenizer.encode(
__a , )
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
| 310
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = "▁"
lowerCamelCase__ = {"vocab_file": "sentencepiece.bpe.model"}
lowerCamelCase__ = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
lowerCamelCase__ = {
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
lowerCamelCase__ = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ :str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ :Optional[int] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE__ :List[int] = []
SCREAMING_SNAKE_CASE__ :List[int] = []
def __init__( self : Dict , __a : int , __a : Any="<s>" , __a : Any="</s>" , __a : Union[str, Any]="</s>" , __a : Dict="<s>" , __a : Optional[Any]="<unk>" , __a : str="<pad>" , __a : Dict="<mask>" , __a : Tuple=None , __a : List[Any]=None , __a : int=None , __a : Optional[Dict[str, Any]] = None , __a : str=None , __a : List[str]=False , **__a : Tuple , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : Optional[int] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
_UpperCamelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
_UpperCamelCase : List[str] = legacy_behaviour
super().__init__(
bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=lowerCamelCase_ , **lowerCamelCase_ , )
_UpperCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase_ ) )
_UpperCamelCase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase : str = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase : str = 1
_UpperCamelCase : Any = len(self.sp_model )
_UpperCamelCase : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCamelCase_ )
}
_UpperCamelCase : Any = {v: k for k, v in self.lang_code_to_id.items()}
_UpperCamelCase : Any = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_UpperCamelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_UpperCamelCase : Union[str, Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_UpperCamelCase : List[str] = src_lang if src_lang is not None else """eng_Latn"""
_UpperCamelCase : Union[str, Any] = self.lang_code_to_id[self._src_lang]
_UpperCamelCase : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : str ) -> Optional[Any]:
_UpperCamelCase : Any = self.__dict__.copy()
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , __a : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCamelCase : Tuple = {}
_UpperCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self._src_lang
@src_lang.setter
def __SCREAMING_SNAKE_CASE ( self : str , __a : str ) -> None:
_UpperCamelCase : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
_UpperCamelCase : Union[str, Any] = [1] * len(self.prefix_tokens )
_UpperCamelCase : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCamelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCamelCase_ )) + ([0] * len(lowerCamelCase_ )) + suffix_ones
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase : Dict = [self.sep_token_id]
_UpperCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Union[str, Any] , __a : str , __a : Optional[str] , __a : Optional[str] , **__a : Dict ) -> Optional[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_UpperCamelCase : Any = src_lang
_UpperCamelCase : Dict = self(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
_UpperCamelCase : Tuple = self.convert_tokens_to_ids(lowerCamelCase_ )
_UpperCamelCase : Optional[Any] = tgt_lang_id
return inputs
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
_UpperCamelCase : str = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __SCREAMING_SNAKE_CASE ( self : int , __a : str ) -> List[str]:
return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Union[str, Any] ) -> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase : Union[str, Any] = self.sp_model.PieceToId(lowerCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Optional[int] ) -> List[str]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Union[str, Any] ) -> Tuple:
_UpperCamelCase : Optional[int] = """""".join(lowerCamelCase_ ).replace(lowerCamelCase_ , " " ).strip()
return out_string
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase : Optional[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ , "wb" ) as fi:
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[str] , __a : str = "eng_Latn" , __a : Optional[List[str]] = None , __a : str = "fra_Latn" , **__a : Any , ) -> BatchEncoding:
_UpperCamelCase : str = src_lang
_UpperCamelCase : List[str] = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : str ) -> None:
_UpperCamelCase : int = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCamelCase : Any = [self.cur_lang_code]
_UpperCamelCase : Dict = [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str ) -> None:
_UpperCamelCase : Tuple = self.lang_code_to_id[lang]
if self.legacy_behaviour:
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCamelCase : Optional[int] = [self.cur_lang_code]
_UpperCamelCase : Optional[Any] = [self.eos_token_id]
| 365
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCamelCase__ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __SCREAMING_SNAKE_CASE ( unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = load_tool("text-question-answering" )
self.tool.setup()
_UpperCamelCase : Union[str, Any] = load_tool("text-question-answering" , remote=__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Dict = self.tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.remote_tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Dict = self.tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : List[Any] = self.remote_tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
| 310
| 0
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> List[Any]:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowercase__ ( lowercase_ ,lowercase_=0 ) -> List[Any]:
"""simple docstring"""
return sorted(SCREAMING_SNAKE_CASE_ ,key=lambda lowercase_ : x[column] )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=float("inf" ) ) -> Union[str, Any]:
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 ,SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = euclidean_distance_sqr(points[i] ,points[j] )
if current_dis < min_dis:
_UpperCamelCase : Any = current_dis
return min_dis
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=float("inf" ) ) -> Union[str, Any]:
"""simple docstring"""
for i in range(min(6 ,points_counts - 1 ) ,SCREAMING_SNAKE_CASE_ ):
for j in range(max(0 ,i - 6 ) ,SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = euclidean_distance_sqr(points[i] ,points[j] )
if current_dis < min_dis:
_UpperCamelCase : List[Any] = current_dis
return min_dis
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[Any]:
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# recursion
_UpperCamelCase : Optional[int] = points_counts // 2
_UpperCamelCase : Optional[int] = closest_pair_of_points_sqr(
SCREAMING_SNAKE_CASE_ ,points_sorted_on_y[:mid] ,SCREAMING_SNAKE_CASE_ )
_UpperCamelCase : List[str] = closest_pair_of_points_sqr(
SCREAMING_SNAKE_CASE_ ,points_sorted_on_y[mid:] ,points_counts - mid )
_UpperCamelCase : Tuple = min(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
_UpperCamelCase : Optional[int] = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(SCREAMING_SNAKE_CASE_ )
_UpperCamelCase : Tuple = dis_between_closest_in_strip(
SCREAMING_SNAKE_CASE_ ,len(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ )
return min(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = column_based_sort(SCREAMING_SNAKE_CASE_ ,column=0 )
_UpperCamelCase : Dict = column_based_sort(SCREAMING_SNAKE_CASE_ ,column=1 )
return (
closest_pair_of_points_sqr(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
) ** 0.5
if __name__ == "__main__":
lowerCamelCase__ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 366
|
"""simple docstring"""
lowerCamelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Tuple = [False] * len(lowercase_ )
_UpperCamelCase : Dict = [s]
_UpperCamelCase : List[str] = True
while queue:
_UpperCamelCase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase_ )
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : List[str] = u
return visited[t]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = [-1] * (len(lowercase_ ))
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : str = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ):
_UpperCamelCase : int = float("Inf" )
_UpperCamelCase : Optional[Any] = sink
while s != source:
# Find the minimum value in select path
_UpperCamelCase : List[Any] = min(lowercase_ ,graph[parent[s]][s] )
_UpperCamelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_UpperCamelCase : Union[str, Any] = sink
while v != source:
_UpperCamelCase : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCamelCase : Dict = parent[v]
for i in range(len(lowercase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 310
| 0
|
"""simple docstring"""
import os
import numpy
import onnx
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Dict = a.name
_UpperCamelCase : Dict = b.name
_UpperCamelCase : Optional[int] = ""
_UpperCamelCase : Tuple = ""
_UpperCamelCase : Optional[int] = a == b
_UpperCamelCase : Optional[Any] = name_a
_UpperCamelCase : List[str] = name_b
return res
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
_graph_replace_input_with(node_proto.attribute[1].g ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[Any]:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : str = list(model.graph.initializer )
_UpperCamelCase : Tuple = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_UpperCamelCase : Optional[int] = inits[i].name
_UpperCamelCase : Optional[Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = os.path.dirname(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase : Any = os.path.basename(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase : Union[str, Any] = onnx.load(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) )
_UpperCamelCase : Tuple = list(model.graph.initializer )
_UpperCamelCase : Any = set()
_UpperCamelCase : List[Any] = {}
_UpperCamelCase : str = []
_UpperCamelCase : Tuple = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if i in dup_set:
continue
for j in range(i + 1 ,len(SCREAMING_SNAKE_CASE__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] ,inits[j] ):
dup_set.add(SCREAMING_SNAKE_CASE__ )
dup_set.add(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase : Tuple = inits[j].data_type
_UpperCamelCase : Tuple = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " ,SCREAMING_SNAKE_CASE__ )
total_reduced_size += mem_size
_UpperCamelCase : Optional[Any] = inits[i].name
_UpperCamelCase : List[str] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(SCREAMING_SNAKE_CASE__ )
else:
_UpperCamelCase : Union[str, Any] = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " ,total_reduced_size / 1_024 / 1_024 / 1_024 ,"GB" )
_UpperCamelCase : Union[str, Any] = sorted(SCREAMING_SNAKE_CASE__ )
_remove_dup_initializers_from_model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
_UpperCamelCase : Union[str, Any] = "optimized_" + model_file_name
_UpperCamelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
onnx.save(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return new_model
| 367
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(lowercase_ ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase_ ,(list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase_ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ["pixel_values"]
def __init__( self : List[str] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : List[Any] , ) -> None:
super().__init__(**__a )
_UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 256}
_UpperCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : int = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCamelCase : Optional[Any] = get_size_dict(__a , param_name="crop_size" )
_UpperCamelCase : str = do_resize
_UpperCamelCase : Dict = size
_UpperCamelCase : int = do_center_crop
_UpperCamelCase : int = crop_size
_UpperCamelCase : Optional[Any] = resample
_UpperCamelCase : Dict = do_rescale
_UpperCamelCase : Any = rescale_factor
_UpperCamelCase : Any = offset
_UpperCamelCase : Union[str, Any] = do_normalize
_UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __SCREAMING_SNAKE_CASE ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray:
_UpperCamelCase : Any = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" in size:
_UpperCamelCase : str = get_resize_output_image_size(__a , size["shortest_edge"] , default_to_square=__a )
elif "height" in size and "width" in size:
_UpperCamelCase : Any = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ) -> np.ndarray:
_UpperCamelCase : List[Any] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Union[int, float] , __a : bool = True , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> Optional[Any]:
_UpperCamelCase : Any = image.astype(np.floataa )
if offset:
_UpperCamelCase : Dict = image - (scale / 2)
return rescale(__a , scale=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
_UpperCamelCase : Optional[Any] = to_numpy_array(__a )
if do_resize:
_UpperCamelCase : Any = self.resize(image=__a , size=__a , resample=__a )
if do_center_crop:
_UpperCamelCase : Dict = self.center_crop(__a , size=__a )
if do_rescale:
_UpperCamelCase : Union[str, Any] = self.rescale(image=__a , scale=__a , offset=__a )
if do_normalize:
_UpperCamelCase : int = self.normalize(image=__a , mean=__a , std=__a )
_UpperCamelCase : str = to_channel_dimension_format(__a , __a )
return image
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
_UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Optional[int] = resample if resample is not None else self.resample
_UpperCamelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : str = offset if offset is not None else self.offset
_UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : Tuple = image_std if image_std is not None else self.image_std
_UpperCamelCase : int = size if size is not None else self.size
_UpperCamelCase : Tuple = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase : Optional[int] = get_size_dict(__a , param_name="crop_size" )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
_UpperCamelCase : Union[str, Any] = make_batched(__a )
_UpperCamelCase : Optional[Any] = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
_UpperCamelCase : List[Any] = {"pixel_values": videos}
return BatchFeature(data=__a , tensor_type=__a )
| 310
| 0
|
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
def run_func(lowercase_ ):
@wraps(lowercase_ )
def run_in_eager_mode(*lowercase_ ,**lowercase_ ):
return func(*lowercase_ ,**lowercase_ )
@wraps(lowercase_ )
@tf.function(experimental_compile=lowercase_ )
def run_in_graph_mode(*lowercase_ ,**lowercase_ ):
return func(*lowercase_ ,**lowercase_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> ["tf.Tensor"]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = random.Random()
_UpperCamelCase : Any = [rng.randint(0 ,vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowercase_ ,shape=(batch_size, sequence_length) ,dtype=tf.intaa )
class __SCREAMING_SNAKE_CASE ( _a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :TensorFlowBenchmarkArguments
SCREAMING_SNAKE_CASE__ :PretrainedConfig
SCREAMING_SNAKE_CASE__ :str = "TensorFlow"
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
return tf.__version__
def __SCREAMING_SNAKE_CASE ( self : int , __a : str , __a : int , __a : int ) -> List[str]:
# initialize GPU on separate process
_UpperCamelCase : Optional[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
_UpperCamelCase : Optional[Any] = self._prepare_inference_func(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self._measure_speed(_inference )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : int , __a : int ) -> int:
_UpperCamelCase : Optional[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
_UpperCamelCase : Optional[Any] = self._prepare_train_func(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self._measure_speed(_train )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : str , __a : int , __a : int ) -> Any:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCamelCase )
_UpperCamelCase : Union[str, Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
_UpperCamelCase : Any = self._prepare_inference_func(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self._measure_memory(_inference )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str , __a : int , __a : int ) -> Optional[int]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCamelCase )
_UpperCamelCase : Union[str, Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
_UpperCamelCase : int = self._prepare_train_func(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self._measure_memory(_train )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : str , __a : int , __a : int ) -> str:
_UpperCamelCase : Optional[int] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
_UpperCamelCase : int = (
hasattr(__lowerCamelCase , "architectures" )
and isinstance(config.architectures , __lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_UpperCamelCase : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_UpperCamelCase : Optional[int] = __import__("transformers" , fromlist=[model_class] )
_UpperCamelCase : List[Any] = getattr(__lowerCamelCase , __lowerCamelCase )
_UpperCamelCase : str = model_cls(__lowerCamelCase )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
_UpperCamelCase : Any = TF_MODEL_MAPPING[config.__class__](__lowerCamelCase )
# encoder-decoder has vocab size saved differently
_UpperCamelCase : str = config.vocab_size if hasattr(__lowerCamelCase , "vocab_size" ) else config.encoder.vocab_size
_UpperCamelCase : List[str] = random_input_ids(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__lowerCamelCase , decoder_input_ids=__lowerCamelCase , training=__lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__lowerCamelCase , training=__lowerCamelCase )
_UpperCamelCase : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : str , __a : int , __a : int ) -> List[str]:
_UpperCamelCase : List[str] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
_UpperCamelCase : List[str] = (
hasattr(__lowerCamelCase , "architectures" )
and isinstance(config.architectures , __lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_UpperCamelCase : str = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_UpperCamelCase : Dict = __import__("transformers" , fromlist=[model_class] )
_UpperCamelCase : Any = getattr(__lowerCamelCase , __lowerCamelCase )
_UpperCamelCase : str = model_cls(__lowerCamelCase )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
_UpperCamelCase : Tuple = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__lowerCamelCase )
# encoder-decoder has vocab size saved differently
_UpperCamelCase : int = config.vocab_size if hasattr(__lowerCamelCase , "vocab_size" ) else config.encoder.vocab_size
_UpperCamelCase : Tuple = random_input_ids(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_UpperCamelCase : Optional[Any] = model(__lowerCamelCase , decoder_input_ids=__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )[0]
_UpperCamelCase : List[str] = tf.gradients(__lowerCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_UpperCamelCase : Optional[Any] = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )[0]
_UpperCamelCase : int = tf.gradients(__lowerCamelCase , model.trainable_variables )
return gradients
_UpperCamelCase : str = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Optional[Any] ) -> Tuple:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__lowerCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_UpperCamelCase : Any = timeit.repeat(
__lowerCamelCase , repeat=self.args.repeat , number=10 , )
return min(__lowerCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Callable[[], None] ) -> Union[str, Any]:
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
_UpperCamelCase : Optional[Any] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
_UpperCamelCase : Dict = """N/A"""
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
_UpperCamelCase : int = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_UpperCamelCase : Union[str, Any] = nvml.nvmlDeviceGetMemoryInfo(__lowerCamelCase )
_UpperCamelCase : Optional[Any] = meminfo.used
_UpperCamelCase : Dict = Memory(__lowerCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
_UpperCamelCase : str = None
else:
_UpperCamelCase : Dict = measure_peak_memory_cpu(__lowerCamelCase )
_UpperCamelCase : Dict = Memory(__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_UpperCamelCase : List[Any] = stop_memory_tracing(__lowerCamelCase )
if memory is None:
_UpperCamelCase : Optional[int] = summary.total
else:
_UpperCamelCase : List[Any] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 368
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCamelCase__ = True
except ImportError:
lowerCamelCase__ = False
try:
from torch.hub import _get_torch_home
lowerCamelCase__ = _get_torch_home()
except ImportError:
lowerCamelCase__ = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
lowerCamelCase__ = os.path.join(torch_cache_home, "transformers")
lowerCamelCase__ = "https://cdn.huggingface.co"
lowerCamelCase__ = "https://s3.amazonaws.com/models.huggingface.co/bert"
lowerCamelCase__ = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
lowerCamelCase__ = os.path.join(PATH, "config.yaml")
lowerCamelCase__ = os.path.join(PATH, "attributes.txt")
lowerCamelCase__ = os.path.join(PATH, "objects.txt")
lowerCamelCase__ = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
lowerCamelCase__ = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
lowerCamelCase__ = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
lowerCamelCase__ = "pytorch_model.bin"
lowerCamelCase__ = "config.yaml"
def lowercase__ ( lowercase_=OBJECTS ,lowercase_=ATTRIBUTES ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
_UpperCamelCase : Any = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = OrderedDict()
with open(lowercase_ ,"rb" ) as f:
_UpperCamelCase : List[str] = pkl.load(lowercase_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
_UpperCamelCase : List[str] = ckp.pop(lowercase_ )
if isinstance(lowercase_ ,np.ndarray ):
_UpperCamelCase : List[Any] = torch.tensor(lowercase_ )
else:
assert isinstance(lowercase_ ,torch.tensor ), type(lowercase_ )
_UpperCamelCase : Optional[Any] = v
return r
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = {}
def __init__( self : str , __a : dict , __a : str = "root" , __a : Any=0 ) -> Any:
_UpperCamelCase : Optional[Any] = name
_UpperCamelCase : Optional[Any] = level
_UpperCamelCase : Union[str, Any] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_UpperCamelCase : Optional[int] = copy.deepcopy(__a )
_UpperCamelCase : Dict = copy.deepcopy(__a )
if isinstance(__a , __a ):
_UpperCamelCase : Union[str, Any] = Config(__a , name=__a , level=level + 1 )
_UpperCamelCase : Optional[Any] = v
setattr(self , __a , __a )
_UpperCamelCase : Optional[Any] = d
def __repr__( self : List[str] ) -> List[Any]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Dict , __a : Union[str, Any] , __a : Optional[int] ) -> int:
_UpperCamelCase : Any = val
_UpperCamelCase : Optional[Any] = val
_UpperCamelCase : Dict = key.split("." )
_UpperCamelCase : int = len(__a ) - 1
_UpperCamelCase : List[str] = self._pointer
if len(__a ) > 1:
for i, l in enumerate(__a ):
if hasattr(self , __a ) and isinstance(getattr(self , __a ) , __a ):
setattr(getattr(self , __a ) , ".".join(levels[i:] ) , __a )
if l == last_level:
_UpperCamelCase : str = val
else:
_UpperCamelCase : List[str] = pointer[l]
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._pointer
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Tuple , __a : List[str] ) -> Dict:
with open(F'''{file_name}''' , "w" ) as stream:
dump(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[Any] , __a : Dict ) -> List[Any]:
with open(F'''{file_name}''' , "w" ) as stream:
json.dump(__a , __a )
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : Union[str, Any] ) -> Optional[int]:
with open(__a ) as stream:
_UpperCamelCase : int = load(__a , Loader=__a )
return data
def __str__( self : List[str] ) -> Tuple:
_UpperCamelCase : List[str] = " "
if self._name != "root":
_UpperCamelCase : Dict = F'''{t * (self._level-1)}{self._name}:\n'''
else:
_UpperCamelCase : Any = ""
_UpperCamelCase : Any = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__a , __a ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(__a ).__name__})\n'''
_UpperCamelCase : Optional[Any] = level
return r[:-1]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Dict , __a : str , **__a : str ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : int = cls.get_config_dict(__a , **__a )
return cls(__a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , __a : str , **__a : Union[str, Any] ) -> Tuple:
_UpperCamelCase : Tuple = kwargs.pop("cache_dir" , __a )
_UpperCamelCase : Optional[int] = kwargs.pop("force_download" , __a )
_UpperCamelCase : str = kwargs.pop("resume_download" , __a )
_UpperCamelCase : Any = kwargs.pop("proxies" , __a )
_UpperCamelCase : List[Any] = kwargs.pop("local_files_only" , __a )
if os.path.isdir(__a ):
_UpperCamelCase : Optional[Any] = os.path.join(__a , __a )
elif os.path.isfile(__a ) or is_remote_url(__a ):
_UpperCamelCase : Optional[int] = pretrained_model_name_or_path
else:
_UpperCamelCase : int = hf_bucket_url(__a , filename=__a , use_cdn=__a )
try:
# Load from URL or cache if already cached
_UpperCamelCase : Optional[int] = cached_path(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_UpperCamelCase : List[Any] = Config.load_yaml(__a )
except EnvironmentError:
_UpperCamelCase : Union[str, Any] = "Can't load config for"
raise EnvironmentError(__a )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(__a ), kwargs
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : str = torch.load("dump.pt" ,map_location=in_tensor.device )
_UpperCamelCase : str = in_tensor.numpy()
_UpperCamelCase : Union[str, Any] = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ), (
F'''{sum([1 for x in np.isclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Dict = urlparse(lowercase_ )
return parsed.scheme in ("http", "https")
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=True ) -> str:
"""simple docstring"""
_UpperCamelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_UpperCamelCase : List[str] = "/" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=0 ,lowercase_=None ,) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowercase_ ,lowercase_ ):
ua += "; " + "; ".join("{}/{}".format(lowercase_ ,lowercase_ ) for k, v in user_agent.items() )
elif isinstance(lowercase_ ,lowercase_ ):
ua += "; " + user_agent
_UpperCamelCase : Any = {"user-agent": ua}
if resume_size > 0:
_UpperCamelCase : str = "bytes=%d-" % (resume_size,)
_UpperCamelCase : str = requests.get(lowercase_ ,stream=lowercase_ ,proxies=lowercase_ ,headers=lowercase_ )
if response.status_code == 416: # Range not satisfiable
return
_UpperCamelCase : List[str] = response.headers.get("Content-Length" )
_UpperCamelCase : Union[str, Any] = resume_size + int(lowercase_ ) if content_length is not None else None
_UpperCamelCase : Optional[int] = tqdm(
unit="B" ,unit_scale=lowercase_ ,total=lowercase_ ,initial=lowercase_ ,desc="Downloading" ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowercase_ ) )
temp_file.write(lowercase_ )
progress.close()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=10 ,lowercase_=False ,lowercase_=None ,lowercase_=False ,) -> Tuple:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : str = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : Dict = str(lowercase_ )
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
_UpperCamelCase : Dict = None
if not local_files_only:
try:
_UpperCamelCase : List[Any] = requests.head(lowercase_ ,allow_redirects=lowercase_ ,proxies=lowercase_ ,timeout=lowercase_ )
if response.status_code == 200:
_UpperCamelCase : str = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_UpperCamelCase : int = url_to_filename(lowercase_ ,lowercase_ )
# get cache path to put the file
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowercase_ ):
return cache_path
else:
_UpperCamelCase : Optional[int] = [
file
for file in fnmatch.filter(os.listdir(lowercase_ ) ,filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(lowercase_ ) > 0:
return os.path.join(lowercase_ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(lowercase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_UpperCamelCase : Dict = cache_path + ".lock"
with FileLock(lowercase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowercase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_UpperCamelCase : List[str] = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(lowercase_ ,"a+b" ) as f:
yield f
_UpperCamelCase : Union[str, Any] = _resumable_file_manager
if os.path.exists(lowercase_ ):
_UpperCamelCase : str = os.stat(lowercase_ ).st_size
else:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Tuple = partial(tempfile.NamedTemporaryFile ,dir=lowercase_ ,delete=lowercase_ )
_UpperCamelCase : Optional[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" ,lowercase_ ,temp_file.name ,)
http_get(
lowercase_ ,lowercase_ ,proxies=lowercase_ ,resume_size=lowercase_ ,user_agent=lowercase_ ,)
os.replace(temp_file.name ,lowercase_ )
_UpperCamelCase : Optional[int] = {"url": url, "etag": etag}
_UpperCamelCase : List[str] = cache_path + ".json"
with open(lowercase_ ,"w" ) as meta_file:
json.dump(lowercase_ ,lowercase_ )
return cache_path
def lowercase__ ( lowercase_ ,lowercase_=None ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[int] = url.encode("utf-8" )
_UpperCamelCase : List[str] = shaaaa(lowercase_ )
_UpperCamelCase : List[str] = url_hash.hexdigest()
if etag:
_UpperCamelCase : Optional[Any] = etag.encode("utf-8" )
_UpperCamelCase : Optional[Any] = shaaaa(lowercase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=False ,lowercase_=False ,) -> str:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : List[Any] = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if is_remote_url(lowercase_ ):
# URL, so get it from the cache (downloading if necessary)
_UpperCamelCase : Union[str, Any] = get_from_cache(
lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,proxies=lowercase_ ,resume_download=lowercase_ ,user_agent=lowercase_ ,local_files_only=lowercase_ ,)
elif os.path.exists(lowercase_ ):
# File, and it exists.
_UpperCamelCase : List[str] = url_or_filename
elif urlparse(lowercase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(lowercase_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(lowercase_ ) )
if extract_compressed_file:
if not is_zipfile(lowercase_ ) and not tarfile.is_tarfile(lowercase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_UpperCamelCase, _UpperCamelCase : Any = os.path.split(lowercase_ )
_UpperCamelCase : Optional[int] = output_file.replace("." ,"-" ) + "-extracted"
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
if os.path.isdir(lowercase_ ) and os.listdir(lowercase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_UpperCamelCase : Optional[int] = output_path + ".lock"
with FileLock(lowercase_ ):
shutil.rmtree(lowercase_ ,ignore_errors=lowercase_ )
os.makedirs(lowercase_ )
if is_zipfile(lowercase_ ):
with ZipFile(lowercase_ ,"r" ) as zip_file:
zip_file.extractall(lowercase_ )
zip_file.close()
elif tarfile.is_tarfile(lowercase_ ):
_UpperCamelCase : int = tarfile.open(lowercase_ )
tar_file.extractall(lowercase_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(lowercase_ ) )
return output_path_extracted
return output_path
def lowercase__ ( lowercase_ ,lowercase_="," ) -> Optional[int]:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
with open(lowercase_ ) as f:
_UpperCamelCase : Tuple = eval(f.read() )
else:
_UpperCamelCase : str = requests.get(lowercase_ )
try:
_UpperCamelCase : Optional[int] = requests.json()
except Exception:
_UpperCamelCase : Union[str, Any] = req.content.decode()
assert data is not None, "could not connect"
try:
_UpperCamelCase : List[Any] = eval(lowercase_ )
except Exception:
_UpperCamelCase : int = data.split("\n" )
req.close()
return data
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : List[Any] = requests.get(lowercase_ )
_UpperCamelCase : Optional[int] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : List[Any] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowercase_ )
with open(lowercase_ ,"rb" ) as stream:
_UpperCamelCase : Union[str, Any] = pkl.load(lowercase_ )
_UpperCamelCase : Union[str, Any] = weights.pop("model" )
_UpperCamelCase : Optional[int] = {}
for k, v in model.items():
_UpperCamelCase : str = torch.from_numpy(lowercase_ )
if "running_var" in k:
_UpperCamelCase : List[Any] = torch.tensor([0] )
_UpperCamelCase : str = k.replace("running_var" ,"num_batches_tracked" )
_UpperCamelCase : Any = zero
return new
def lowercase__ ( ) -> Dict:
"""simple docstring"""
print(F'''{os.path.abspath(os.path.join(lowercase_ ,os.pardir ) )}/demo.ipynb''' )
def lowercase__ ( lowercase_ ,lowercase_="RGB" ) -> int:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : Optional[Any] = cva.imread(lowercase_ )
else:
_UpperCamelCase : Optional[int] = get_image_from_url(lowercase_ )
assert img is not None, F'''could not connect to: {im}'''
_UpperCamelCase : Optional[int] = cva.cvtColor(lowercase_ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
_UpperCamelCase : List[Any] = img[:, :, ::-1]
return img
def lowercase__ ( lowercase_ ,lowercase_=1 ) -> List[Any]:
"""simple docstring"""
return (images[i : i + batch] for i in range(0 ,len(lowercase_ ) ,lowercase_ ))
| 310
| 0
|
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict[str, float]:
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(a__ ,2 ) - pow(a__ ,2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(a__ ,2 ) - pow(a__ ,2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(a__ ,2 ) + pow(a__ ,2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
"""simple docstring"""
import torch
from transformers import AutoModel
class __SCREAMING_SNAKE_CASE ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple="sayef/fsner-bert-base-uncased" ) -> Dict:
super(__a , self ).__init__()
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained(__a , return_dict=__a )
_UpperCamelCase : str = torch.nn.CosineSimilarity(3 , 1e-0_8 )
_UpperCamelCase : List[str] = torch.nn.Softmax(dim=1 )
def __SCREAMING_SNAKE_CASE ( self : int , **__a : Tuple ) -> Optional[Any]:
return self.bert(**__a ).last_hidden_state
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Any , __a : List[Any] , __a : Tuple=1 ) -> List[Any]:
return self.softmax(T * self.cos(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Dict ) -> Union[str, Any]:
_UpperCamelCase : str = W_supports["sizes"].tolist()
_UpperCamelCase : Any = W_supports["start_token_id"].item()
_UpperCamelCase : Optional[Any] = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_UpperCamelCase : str = self.BERT(**__a )
_UpperCamelCase : int = self.BERT(**__a )
_UpperCamelCase : int = None
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : List[Any] = W_supports["input_ids"] == start_token_id
_UpperCamelCase : Optional[int] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Any = support_sizes[i - 1]
_UpperCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]]
_UpperCamelCase : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_UpperCamelCase : List[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_UpperCamelCase : Any = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_UpperCamelCase : Any = torch.vstack((p_starts, p_start) )
_UpperCamelCase : Any = torch.vstack((p_ends, p_end) )
else:
_UpperCamelCase : Optional[Any] = p_start
_UpperCamelCase : str = p_end
return p_starts, p_ends
| 310
| 0
|
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : int = 16 , __a : int = 88 , __a : Optional[int] = None , __a : int = 1 , __a : float = 0.0 , __a : int = 32 , __a : Optional[int] = None , __a : bool = False , __a : Optional[int] = None , __a : Optional[int] = None , __a : str = "geglu" , __a : Optional[int] = None , ) -> str:
super().__init__()
_UpperCamelCase : int = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__a , attention_head_dim=__a , in_channels=__a , num_layers=__a , dropout=__a , norm_num_groups=__a , cross_attention_dim=__a , attention_bias=__a , sample_size=__a , num_vector_embeds=__a , activation_fn=__a , num_embeds_ada_norm=__a , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCamelCase : str = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCamelCase : Any = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCamelCase : List[str] = [1, 0]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[int] , __a : Optional[Any] , __a : Union[str, Any]=None , __a : Dict=None , __a : List[Any]=None , __a : bool = True , ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = hidden_states
_UpperCamelCase : List[str] = []
_UpperCamelCase : Union[str, Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCamelCase : Union[str, Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCamelCase : int = self.transformer_index_for_condition[i]
_UpperCamelCase : List[Any] = self.transformers[transformer_index](
__a , encoder_hidden_states=__a , timestep=__a , cross_attention_kwargs=__a , return_dict=__a , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_UpperCamelCase : List[str] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCamelCase : Any = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__a )
| 370
|
"""simple docstring"""
from typing import Any
def lowercase__ ( lowercase_ ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
_UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list]
_UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 0
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = "altclip_text_model"
def __init__( self : int , __a : Union[str, Any]=25_0002 , __a : Optional[Any]=1024 , __a : Optional[Any]=24 , __a : Union[str, Any]=16 , __a : Optional[int]=4096 , __a : Optional[int]="gelu" , __a : List[Any]=0.1 , __a : int=0.1 , __a : str=514 , __a : Dict=1 , __a : Tuple=0.02 , __a : str=0.02 , __a : Tuple=1e-0_5 , __a : Union[str, Any]=1 , __a : Dict=0 , __a : List[str]=2 , __a : List[Any]="absolute" , __a : Tuple=True , __a : Tuple=768 , **__a : Optional[Any] , ) -> List[str]:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCamelCase : Optional[int] = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : str = hidden_act
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : Union[str, Any] = hidden_dropout_prob
_UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCamelCase : List[str] = max_position_embeddings
_UpperCamelCase : Optional[Any] = type_vocab_size
_UpperCamelCase : Union[str, Any] = initializer_range
_UpperCamelCase : Optional[Any] = initializer_factor
_UpperCamelCase : Optional[int] = layer_norm_eps
_UpperCamelCase : int = position_embedding_type
_UpperCamelCase : List[Any] = use_cache
_UpperCamelCase : Optional[int] = project_dim
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = "altclip_vision_model"
def __init__( self : Dict , __a : Optional[int]=768 , __a : Union[str, Any]=3072 , __a : int=512 , __a : Any=12 , __a : Tuple=12 , __a : Union[str, Any]=3 , __a : Dict=224 , __a : Any=32 , __a : List[str]="quick_gelu" , __a : List[Any]=1e-5 , __a : Any=0.0 , __a : Union[str, Any]=0.02 , __a : List[Any]=1.0 , **__a : Dict , ) -> Tuple:
super().__init__(**_SCREAMING_SNAKE_CASE )
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : List[Any] = intermediate_size
_UpperCamelCase : Tuple = projection_dim
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : Union[str, Any] = patch_size
_UpperCamelCase : List[Any] = image_size
_UpperCamelCase : List[Any] = initializer_range
_UpperCamelCase : List[Any] = initializer_factor
_UpperCamelCase : Tuple = attention_dropout
_UpperCamelCase : Optional[Any] = layer_norm_eps
_UpperCamelCase : Tuple = hidden_act
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[Any] , __a : List[Any] , **__a : Union[str, Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
_UpperCamelCase : str = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
_UpperCamelCase : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = "altclip"
SCREAMING_SNAKE_CASE__ :List[Any] = True
def __init__( self : List[Any] , __a : Optional[Any]=None , __a : Tuple=None , __a : Any=768 , __a : Dict=2.65_92 , **__a : Tuple ) -> Optional[Any]:
_UpperCamelCase : int = kwargs.pop("text_config_dict" , _SCREAMING_SNAKE_CASE )
_UpperCamelCase : Tuple = kwargs.pop("vision_config_dict" , _SCREAMING_SNAKE_CASE )
super().__init__(**_SCREAMING_SNAKE_CASE )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
_UpperCamelCase : List[Any] = {}
# This is the complete result when using `text_config_dict`.
_UpperCamelCase : List[str] = AltCLIPTextConfig(**_SCREAMING_SNAKE_CASE ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
_UpperCamelCase : Tuple = (
F'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
F'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
_UpperCamelCase : List[str] = (
F'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
F'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(_SCREAMING_SNAKE_CASE )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
_UpperCamelCase : int = {}
# This is the complete result when using `vision_config_dict`.
_UpperCamelCase : List[Any] = AltCLIPVisionConfig(**_SCREAMING_SNAKE_CASE ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
_UpperCamelCase : Union[str, Any] = {
str(_SCREAMING_SNAKE_CASE ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
_UpperCamelCase : List[str] = (
F'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
F'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
_UpperCamelCase : Tuple = (
F'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
F'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(_SCREAMING_SNAKE_CASE )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
_UpperCamelCase : List[str] = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
_UpperCamelCase : Tuple = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
_UpperCamelCase : str = AltCLIPTextConfig(**_SCREAMING_SNAKE_CASE )
_UpperCamelCase : List[str] = AltCLIPVisionConfig(**_SCREAMING_SNAKE_CASE )
_UpperCamelCase : Any = projection_dim
_UpperCamelCase : Any = logit_scale_init_value
_UpperCamelCase : List[Any] = 1.0
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __a : Tuple , __a : Union[str, Any] , **__a : Tuple ) -> Optional[int]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
_UpperCamelCase : Tuple = copy.deepcopy(self.__dict__ )
_UpperCamelCase : List[Any] = self.text_config.to_dict()
_UpperCamelCase : Union[str, Any] = self.vision_config.to_dict()
_UpperCamelCase : List[Any] = self.__class__.model_type
return output
| 371
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCamelCase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "rag"
SCREAMING_SNAKE_CASE__ :List[str] = True
def __init__( self : List[Any] , __a : Optional[Any]=None , __a : str=True , __a : Tuple=None , __a : Dict=None , __a : Optional[int]=None , __a : Optional[int]=None , __a : List[Any]=None , __a : Dict=" / " , __a : int=" // " , __a : Optional[Any]=5 , __a : Dict=300 , __a : Optional[int]=768 , __a : Tuple=8 , __a : Union[str, Any]="wiki_dpr" , __a : Dict="train" , __a : List[Any]="compressed" , __a : str=None , __a : Tuple=None , __a : int=False , __a : str=False , __a : Optional[int]=0.0 , __a : Dict=True , __a : Tuple=False , __a : Dict=False , __a : str=False , __a : str=True , __a : Optional[Any]=None , **__a : Tuple , ) -> Any:
super().__init__(
bos_token_id=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , is_encoder_decoder=__a , prefix=__a , vocab_size=__a , **__a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_UpperCamelCase : Optional[int] = kwargs.pop("question_encoder" )
_UpperCamelCase : str = question_encoder_config.pop("model_type" )
_UpperCamelCase : Tuple = kwargs.pop("generator" )
_UpperCamelCase : str = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_UpperCamelCase : Union[str, Any] = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : str = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : Optional[int] = reduce_loss
_UpperCamelCase : str = label_smoothing
_UpperCamelCase : int = exclude_bos_score
_UpperCamelCase : List[str] = do_marginalize
_UpperCamelCase : Optional[int] = title_sep
_UpperCamelCase : Optional[int] = doc_sep
_UpperCamelCase : Union[str, Any] = n_docs
_UpperCamelCase : Tuple = max_combined_length
_UpperCamelCase : Union[str, Any] = dataset
_UpperCamelCase : Any = dataset_split
_UpperCamelCase : List[str] = index_name
_UpperCamelCase : int = retrieval_vector_size
_UpperCamelCase : str = retrieval_batch_size
_UpperCamelCase : Dict = passages_path
_UpperCamelCase : str = index_path
_UpperCamelCase : Tuple = use_dummy_dataset
_UpperCamelCase : Union[str, Any] = output_retrieved
_UpperCamelCase : Optional[Any] = do_deduplication
_UpperCamelCase : str = use_cache
if self.forced_eos_token_id is None:
_UpperCamelCase : List[str] = getattr(self.generator , "forced_eos_token_id" , __a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Dict = copy.deepcopy(self.__dict__ )
_UpperCamelCase : List[Any] = self.question_encoder.to_dict()
_UpperCamelCase : Tuple = self.generator.to_dict()
_UpperCamelCase : Any = self.__class__.model_type
return output
| 310
| 0
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __SCREAMING_SNAKE_CASE ( a_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = "mctct"
def __init__( self : Union[str, Any] , __a : str=8065 , __a : Tuple=1536 , __a : Union[str, Any]=36 , __a : str=6144 , __a : Union[str, Any]=4 , __a : Union[str, Any]=384 , __a : Union[str, Any]=920 , __a : List[Any]=1e-5 , __a : Optional[Any]=0.3 , __a : int="relu" , __a : Optional[Any]=0.02 , __a : Any=0.3 , __a : str=0.3 , __a : str=1 , __a : List[str]=0 , __a : Optional[int]=2 , __a : Tuple=1 , __a : int=0.3 , __a : Optional[int]=1 , __a : Union[str, Any]=(7,) , __a : Dict=(3,) , __a : int=80 , __a : List[Any]=1 , __a : Union[str, Any]=None , __a : Tuple="sum" , __a : Any=False , **__a : Optional[Any] , ) -> List[Any]:
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
_UpperCamelCase : Union[str, Any] = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : List[str] = num_attention_heads
_UpperCamelCase : Any = attention_head_dim
_UpperCamelCase : List[Any] = max_position_embeddings
_UpperCamelCase : Union[str, Any] = layer_norm_eps
_UpperCamelCase : Tuple = layerdrop
_UpperCamelCase : Union[str, Any] = hidden_act
_UpperCamelCase : Union[str, Any] = initializer_range
_UpperCamelCase : Optional[int] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Dict = pad_token_id
_UpperCamelCase : int = bos_token_id
_UpperCamelCase : Union[str, Any] = eos_token_id
_UpperCamelCase : int = conv_glu_dim
_UpperCamelCase : Optional[int] = conv_dropout
_UpperCamelCase : List[Any] = num_conv_layers
_UpperCamelCase : List[str] = input_feat_per_channel
_UpperCamelCase : List[str] = input_channels
_UpperCamelCase : Dict = conv_channels
_UpperCamelCase : Union[str, Any] = ctc_loss_reduction
_UpperCamelCase : Union[str, Any] = ctc_zero_infinity
# prevents config testing fail with exporting to json
_UpperCamelCase : Optional[int] = list(__a )
_UpperCamelCase : List[Any] = list(__a )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 350
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : List[Any] , __a : str=13 , __a : Any=30 , __a : List[str]=2 , __a : Dict=3 , __a : Union[str, Any]=True , __a : Dict=True , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : str=0.1 , __a : Optional[int]=0.1 , __a : Union[str, Any]=10 , __a : Optional[Any]=0.02 , __a : List[Any]=None , __a : str=2 , ) -> int:
_UpperCamelCase : Tuple = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : int = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = type_sequence_label_size
_UpperCamelCase : int = initializer_range
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Any = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCamelCase : Optional[int] = num_patches + 1
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Union[str, Any] = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Any = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = ViTModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[int] , __a : int ) -> Optional[int]:
_UpperCamelCase : Tuple = ViTForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Union[str, Any] = ViTForMaskedImageModeling(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : Dict = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : int , __a : Dict ) -> int:
_UpperCamelCase : Any = self.type_sequence_label_size
_UpperCamelCase : Optional[Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : int = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Union[str, Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : List[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : Union[str, Any] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :Any = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :str = True
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Dict = ViTModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__a )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[str] = [*signature.parameters.keys()]
_UpperCamelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[str] = ViTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : List[Any] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__a )
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[Any] = prepare_img()
_UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : Dict = model(**__a )
# verify the logits
_UpperCamelCase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_UpperCamelCase : List[str] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__a )
_UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : str = model(__a , interpolate_pos_encoding=__a )
# verify the logits
_UpperCamelCase : int = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , __a )
_UpperCamelCase : int = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
_UpperCamelCase : int = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : Union[str, Any] = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCamelCase : int = model(__a )
| 310
| 0
|
"""simple docstring"""
import os
lowerCamelCase__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : List[str] = 0
_UpperCamelCase : Any = 0
while index < len(_SCREAMING_SNAKE_CASE ) - 1:
_UpperCamelCase : Optional[int] = SYMBOLS[numerals[index]]
_UpperCamelCase : str = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = ""
_UpperCamelCase : Optional[Any] = num // 1_000
numerals += m_count * "M"
num %= 1_000
_UpperCamelCase : List[str] = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_UpperCamelCase : Optional[int] = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def lowercase__ ( lowercase_ = "/p089_roman.txt" ) -> int:
"""simple docstring"""
_UpperCamelCase : Tuple = 0
with open(os.path.dirname(_SCREAMING_SNAKE_CASE ) + roman_numerals_filename ) as filea:
_UpperCamelCase : int = filea.readlines()
for line in lines:
_UpperCamelCase : Union[str, Any] = line.strip()
_UpperCamelCase : Tuple = parse_roman_numerals(_SCREAMING_SNAKE_CASE )
_UpperCamelCase : List[str] = generate_roman_numerals(_SCREAMING_SNAKE_CASE )
savings += len(_SCREAMING_SNAKE_CASE ) - len(_SCREAMING_SNAKE_CASE )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 351
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[int] = -1
_UpperCamelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Any = TextStreamer(__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Optional[int] = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Dict = -1
_UpperCamelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : List[str] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[int] = tokenizer.decode(greedy_ids[0] )
_UpperCamelCase : Tuple = TextIteratorStreamer(__a )
_UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : Optional[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
_UpperCamelCase : Tuple = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Union[str, Any] = -1
_UpperCamelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : str = greedy_ids[:, input_ids.shape[1] :]
_UpperCamelCase : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Optional[int] = TextStreamer(__a , skip_prompt=__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Tuple = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("distilgpt2" )
_UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a )
_UpperCamelCase : int = -1
_UpperCamelCase : Any = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCamelCase : List[str] = TextStreamer(__a , skip_special_tokens=__a )
model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCamelCase : int = cs.out[:-1] # Remove the final "\n"
_UpperCamelCase : int = tokenizer(__a , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[Any] = -1
_UpperCamelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Any = TextIteratorStreamer(__a , timeout=0.0_01 )
_UpperCamelCase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : List[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__a ):
_UpperCamelCase : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 310
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = None
SCREAMING_SNAKE_CASE__ :Optional[int] = None
SCREAMING_SNAKE_CASE__ :List[Any] = None # sigma(t_i)
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] ) -> Tuple:
return cls()
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = 42
SCREAMING_SNAKE_CASE__ :str = 42
SCREAMING_SNAKE_CASE__ :List[str] = 42
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
return True
@register_to_config
def __init__( self : Optional[int] , __a : Union[str, Any] = 0.02 , __a : str = 100 , __a : int = 1.0_07 , __a : int = 80 , __a : Optional[int] = 0.05 , __a : Optional[Any] = 50 , ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
return KarrasVeSchedulerState.create()
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[int] , __a : Any , __a : int = () ) -> KarrasVeSchedulerState:
_UpperCamelCase : Tuple = jnp.arange(0 , __a )[::-1].copy()
_UpperCamelCase : Dict = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__a , schedule=jnp.array(__a , dtype=jnp.floataa ) , timesteps=__a , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Tuple , __a : Dict , __a : Any , __a : List[str] , ) -> Tuple[jnp.ndarray, float]:
if self.config.s_min <= sigma <= self.config.s_max:
_UpperCamelCase : Optional[Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
_UpperCamelCase : Tuple = 0
# sample eps ~ N(0, S_noise^2 * I)
_UpperCamelCase : Any = random.split(__a , num=1 )
_UpperCamelCase : Optional[Any] = self.config.s_noise * random.normal(key=__a , shape=sample.shape )
_UpperCamelCase : str = sigma + gamma * sigma
_UpperCamelCase : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Any , __a : List[str] , __a : Optional[int] , __a : List[str] , __a : List[Any] , __a : List[str] = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
_UpperCamelCase : Dict = sample_hat + sigma_hat * model_output
_UpperCamelCase : int = (sample_hat - pred_original_sample) / sigma_hat
_UpperCamelCase : Any = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__a , derivative=__a , state=__a )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[str] , __a : List[str] , __a : int , __a : Optional[Any] , __a : Union[str, Any] , __a : int , __a : List[str] , __a : Dict = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
_UpperCamelCase : Union[str, Any] = sample_prev + sigma_prev * model_output
_UpperCamelCase : List[Any] = (sample_prev - pred_original_sample) / sigma_prev
_UpperCamelCase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__a , derivative=__a , state=__a )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Tuple , __a : int , __a : int , __a : Dict ) -> Any:
raise NotImplementedError()
| 352
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
with open(lowercase_ ) as metadata_file:
_UpperCamelCase : Dict = json.load(lowercase_ )
_UpperCamelCase : str = LukeConfig(use_entity_aware_attention=lowercase_ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
_UpperCamelCase : str = torch.load(lowercase_ ,map_location="cpu" )["module"]
# Load the entity vocab file
_UpperCamelCase : Dict = load_original_entity_vocab(lowercase_ )
# add an entry for [MASK2]
_UpperCamelCase : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCamelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCamelCase : Dict = AddedToken("<ent>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
_UpperCamelCase : Union[str, Any] = AddedToken("<ent2>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"r" ) as f:
_UpperCamelCase : Tuple = json.load(lowercase_ )
_UpperCamelCase : Optional[int] = "MLukeTokenizer"
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
with open(os.path.join(lowercase_ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
_UpperCamelCase : int = MLukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(["@"] )[0]
_UpperCamelCase : str = tokenizer.convert_tokens_to_ids(["#"] )[0]
_UpperCamelCase : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
_UpperCamelCase : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 )
_UpperCamelCase : List[str] = word_emb[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCamelCase : Optional[Any] = state_dict[bias_name]
_UpperCamelCase : List[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCamelCase : Tuple = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCamelCase : Tuple = F'''encoder.layer.{layer_index}.attention.self.'''
_UpperCamelCase : List[Any] = state_dict[prefix + matrix_name]
_UpperCamelCase : str = state_dict[prefix + matrix_name]
_UpperCamelCase : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCamelCase : Any = state_dict["entity_embeddings.entity_embeddings.weight"]
_UpperCamelCase : Tuple = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : int = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCamelCase : int = state_dict["entity_predictions.bias"]
_UpperCamelCase : Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCamelCase : str = LukeForMaskedLM(config=lowercase_ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
_UpperCamelCase : List[str] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
_UpperCamelCase : Union[str, Any] = state_dict[key]
else:
_UpperCamelCase : Dict = state_dict[key]
_UpperCamelCase, _UpperCamelCase : Optional[Any] = model.load_state_dict(lowercase_ ,strict=lowercase_ )
if set(lowercase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(lowercase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ,task="entity_classification" )
_UpperCamelCase : Dict = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
_UpperCamelCase : Optional[Any] = (0, 9)
_UpperCamelCase : int = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : List[str] = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 33, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 1, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ )
_UpperCamelCase : int = "Tokyo is the capital of <mask>."
_UpperCamelCase : List[Any] = (24, 30)
_UpperCamelCase : Any = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : Optional[Any] = model(**lowercase_ )
_UpperCamelCase : int = encoding["input_ids"][0].tolist()
_UpperCamelCase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
_UpperCamelCase : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase_ )
_UpperCamelCase : Union[str, Any] = outputs.entity_logits[0][0].argmax().item()
_UpperCamelCase : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[str] = ["[MASK]", "[PAD]", "[UNK]"]
_UpperCamelCase : Tuple = [json.loads(lowercase_ ) for line in open(lowercase_ )]
_UpperCamelCase : List[str] = {}
for entry in data:
_UpperCamelCase : Any = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCamelCase : Dict = entity_id
break
_UpperCamelCase : Dict = F'''{language}:{entity_name}'''
_UpperCamelCase : str = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCamelCase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 310
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
lowerCamelCase__ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
for attribute in key.split("." ):
_UpperCamelCase : List[str] = getattr(UpperCAmelCase_ ,UpperCAmelCase_ )
if weight_type is not None:
_UpperCamelCase : Union[str, Any] = getattr(UpperCAmelCase_ ,UpperCAmelCase_ ).shape
else:
_UpperCamelCase : Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_UpperCamelCase : int = value
elif weight_type == "weight_g":
_UpperCamelCase : List[str] = value
elif weight_type == "weight_v":
_UpperCamelCase : Union[str, Any] = value
elif weight_type == "bias":
_UpperCamelCase : str = value
elif weight_type == "running_mean":
_UpperCamelCase : str = value
elif weight_type == "running_var":
_UpperCamelCase : Dict = value
elif weight_type == "num_batches_tracked":
_UpperCamelCase : str = value
elif weight_type == "inv_freq":
_UpperCamelCase : Optional[Any] = value
else:
_UpperCamelCase : Optional[Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = []
_UpperCamelCase : Optional[int] = fairseq_model.state_dict()
_UpperCamelCase : int = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,hf_model.config.feat_extract_norm == "group" ,)
_UpperCamelCase : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
_UpperCamelCase : Optional[int] = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_UpperCamelCase : Tuple = True
if "*" in mapped_key:
_UpperCamelCase : Optional[Any] = name.split(UpperCAmelCase_ )[0].split("." )[-2]
_UpperCamelCase : Optional[int] = mapped_key.replace("*" ,UpperCAmelCase_ )
if "pos_bias_u" in name:
_UpperCamelCase : Optional[int] = None
elif "pos_bias_v" in name:
_UpperCamelCase : List[str] = None
elif "weight_g" in name:
_UpperCamelCase : Dict = "weight_g"
elif "weight_v" in name:
_UpperCamelCase : Union[str, Any] = "weight_v"
elif "bias" in name:
_UpperCamelCase : Any = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCamelCase : Optional[Any] = "weight"
elif "running_mean" in name:
_UpperCamelCase : List[Any] = "running_mean"
elif "inv_freq" in name:
_UpperCamelCase : Union[str, Any] = "inv_freq"
elif "running_var" in name:
_UpperCamelCase : List[str] = "running_var"
elif "num_batches_tracked" in name:
_UpperCamelCase : str = "num_batches_tracked"
else:
_UpperCamelCase : List[Any] = None
set_recursively(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : str = full_name.split("conv_layers." )[-1]
_UpperCamelCase : Union[str, Any] = name.split("." )
_UpperCamelCase : Any = int(items[0] )
_UpperCamelCase : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
_UpperCamelCase : str = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
_UpperCamelCase : Dict = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
_UpperCamelCase : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
_UpperCamelCase : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase_ )
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=True ) -> Any:
"""simple docstring"""
if config_path is not None:
_UpperCamelCase : Any = WavaVecaConformerConfig.from_pretrained(UpperCAmelCase_ ,hidden_act="swish" )
else:
_UpperCamelCase : List[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_UpperCamelCase : Optional[Any] = "rotary"
if is_finetuned:
if dict_path:
_UpperCamelCase : List[str] = Dictionary.load(UpperCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCamelCase : List[Any] = target_dict.pad_index
_UpperCamelCase : int = target_dict.bos_index
_UpperCamelCase : Optional[int] = target_dict.eos_index
_UpperCamelCase : Union[str, Any] = len(target_dict.symbols )
_UpperCamelCase : List[str] = os.path.join(UpperCAmelCase_ ,"vocab.json" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCAmelCase_ ) )
return
os.makedirs(UpperCAmelCase_ ,exist_ok=UpperCAmelCase_ )
_UpperCamelCase : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
_UpperCamelCase : Dict = 0
_UpperCamelCase : Dict = 1
with open(UpperCAmelCase_ ,"w" ,encoding="utf-8" ) as vocab_handle:
json.dump(UpperCAmelCase_ ,UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = WavaVecaCTCTokenizer(
UpperCAmelCase_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=UpperCAmelCase_ ,)
_UpperCamelCase : Dict = True if config.feat_extract_norm == "layer" else False
_UpperCamelCase : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,)
_UpperCamelCase : List[Any] = WavaVecaProcessor(feature_extractor=UpperCAmelCase_ ,tokenizer=UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
_UpperCamelCase : str = WavaVecaConformerForCTC(UpperCAmelCase_ )
else:
_UpperCamelCase : Optional[Any] = WavaVecaConformerForPreTraining(UpperCAmelCase_ )
if is_finetuned:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_UpperCamelCase : int = argparse.Namespace(task="audio_pretraining" )
_UpperCamelCase : Dict = fairseq.tasks.setup_task(UpperCAmelCase_ )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=UpperCAmelCase_ )
_UpperCamelCase : Dict = model[0].eval()
recursively_load_weights(UpperCAmelCase_ ,UpperCAmelCase_ ,not is_finetuned )
hf_wavavec.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowerCamelCase__ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 353
|
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCamelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
lowerCamelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
lowerCamelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[List[List[str]]] , __a : List[List[str]] , __a : int = 1 , __a : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__a , hypotheses=__a , min_len=__a , max_len=__a )
}
| 310
| 0
|
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Tuple:
"""simple docstring"""
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_="attention" ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
_UpperCamelCase : int = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_UpperCamelCase : Union[str, Any] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
_UpperCamelCase : Any = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_UpperCamelCase : List[str] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
_UpperCamelCase : Any = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_UpperCamelCase : str = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
_UpperCamelCase : Union[str, Any] = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=False ) -> str:
"""simple docstring"""
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
_UpperCamelCase : List[str] = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
_UpperCamelCase : int = (wi_a, wi_a)
else:
_UpperCamelCase : List[str] = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
_UpperCamelCase : Optional[Any] = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def lowercase__ ( lowercase_ ,*, lowercase_ ,lowercase_ ,lowercase_ = False ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = traverse_util.flatten_dict(variables["target"] )
_UpperCamelCase : str = {"/".join(_lowerCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_UpperCamelCase : List[Any] = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" ,_lowerCAmelCase )
_UpperCamelCase : Dict = collections.OrderedDict()
# Shared embeddings.
_UpperCamelCase : Union[str, Any] = old["token_embedder/embedding"]
# Encoder.
for i in range(_lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[Any] = tax_layer_norm_lookup(_lowerCAmelCase ,_lowerCAmelCase ,"encoder" ,"pre_attention_layer_norm" )
_UpperCamelCase : int = tax_attention_lookup(_lowerCAmelCase ,_lowerCAmelCase ,"encoder" ,"attention" )
_UpperCamelCase : int = layer_norm
_UpperCamelCase : Optional[int] = k.T
_UpperCamelCase : Tuple = o.T
_UpperCamelCase : List[str] = q.T
_UpperCamelCase : Any = v.T
# Block i, layer 1 (MLP).
_UpperCamelCase : int = tax_layer_norm_lookup(_lowerCAmelCase ,_lowerCAmelCase ,"encoder" ,"pre_mlp_layer_norm" )
_UpperCamelCase : Tuple = tax_mlp_lookup(_lowerCAmelCase ,_lowerCAmelCase ,"encoder" ,_lowerCAmelCase )
_UpperCamelCase : Optional[Any] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Union[str, Any] = wi[0].T
_UpperCamelCase : Optional[Any] = wi[1].T
else:
_UpperCamelCase : Tuple = wi.T
_UpperCamelCase : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : Any = tax_relpos_bias_lookup(
_lowerCAmelCase ,_lowerCAmelCase ,"encoder" ).T
_UpperCamelCase : Any = old["encoder/encoder_norm/scale"]
if not scalable_attention:
_UpperCamelCase : Optional[Any] = tax_relpos_bias_lookup(
_lowerCAmelCase ,0 ,"encoder" ).T
_UpperCamelCase : List[Any] = tax_relpos_bias_lookup(
_lowerCAmelCase ,0 ,"decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(_lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : List[str] = tax_layer_norm_lookup(_lowerCAmelCase ,_lowerCAmelCase ,"decoder" ,"pre_self_attention_layer_norm" )
_UpperCamelCase : Union[str, Any] = tax_attention_lookup(_lowerCAmelCase ,_lowerCAmelCase ,"decoder" ,"self_attention" )
_UpperCamelCase : Any = layer_norm
_UpperCamelCase : Optional[Any] = k.T
_UpperCamelCase : Any = o.T
_UpperCamelCase : Union[str, Any] = q.T
_UpperCamelCase : str = v.T
# Block i, layer 1 (Cross Attention).
_UpperCamelCase : str = tax_layer_norm_lookup(_lowerCAmelCase ,_lowerCAmelCase ,"decoder" ,"pre_cross_attention_layer_norm" )
_UpperCamelCase : Union[str, Any] = tax_attention_lookup(_lowerCAmelCase ,_lowerCAmelCase ,"decoder" ,"encoder_decoder_attention" )
_UpperCamelCase : Dict = layer_norm
_UpperCamelCase : List[Any] = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : int = q.T
_UpperCamelCase : Optional[int] = v.T
# Block i, layer 2 (MLP).
_UpperCamelCase : Dict = tax_layer_norm_lookup(_lowerCAmelCase ,_lowerCAmelCase ,"decoder" ,"pre_mlp_layer_norm" )
_UpperCamelCase : Union[str, Any] = tax_mlp_lookup(_lowerCAmelCase ,_lowerCAmelCase ,"decoder" ,_lowerCAmelCase )
_UpperCamelCase : str = layer_norm
if split_mlp_wi:
_UpperCamelCase : List[str] = wi[0].T
_UpperCamelCase : int = wi[1].T
else:
_UpperCamelCase : str = wi.T
_UpperCamelCase : int = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : Optional[int] = tax_relpos_bias_lookup(_lowerCAmelCase ,_lowerCAmelCase ,"decoder" ).T
_UpperCamelCase : Optional[Any] = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_UpperCamelCase : List[Any] = old["decoder/logits_dense/kernel"].T
return new
def lowercase__ ( lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : Optional[int] = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : Optional[int] = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
_UpperCamelCase : List[Any] = state_dict["shared.weight"]
return state_dict
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : int = checkpoints.load_tax_checkpoint(_lowerCAmelCase )
_UpperCamelCase : Dict = convert_tax_to_pytorch(
_lowerCAmelCase ,num_layers=config.num_layers ,is_encoder_only=_lowerCAmelCase ,scalable_attention=_lowerCAmelCase )
_UpperCamelCase : int = make_state_dict(_lowerCAmelCase ,_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase ,strict=_lowerCAmelCase )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = False ,lowercase_ = False ,) -> int:
"""simple docstring"""
_UpperCamelCase : int = MTaConfig.from_json_file(_lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_UpperCamelCase : str = UMTaEncoderModel(_lowerCAmelCase )
else:
_UpperCamelCase : int = UMTaForConditionalGeneration(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_lowerCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_lowerCAmelCase )
print("Done" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
lowerCamelCase__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 354
|
"""simple docstring"""
from __future__ import annotations
from math import pi
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Optional[Any] ) -> List[Any]:
_UpperCamelCase : int = value
_UpperCamelCase : List[str] = None
_UpperCamelCase : Optional[Any] = None
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __a : int ) -> Union[str, Any]:
_UpperCamelCase : List[str] = tree
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Dict ) -> Optional[Any]:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Optional[int] ) -> Union[str, Any]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355
|
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCamelCase__ = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCamelCase__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if "://" in dataset_path:
_UpperCamelCase : List[Any] = dataset_path.split("://" )[1]
return dataset_path
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = not is_remote_filesystem(lowercase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase_ ) ,fs._strip_protocol(lowercase_ ) )
else:
fs.mv(lowercase_ ,lowercase_ ,recursive=lowercase_ )
def lowercase__ ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn ,"reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_UpperCamelCase : Dict = None
_UpperCamelCase : str = None
_UpperCamelCase : str = threading.Lock()
| 310
| 0
|
"""simple docstring"""
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
lowerCamelCase__ = 'facebook/wmt19-en-de'
lowerCamelCase__ = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
lowerCamelCase__ = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
lowerCamelCase__ = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
lowerCamelCase__ = tokenizer(["Making tiny model"], return_tensors="pt")
lowerCamelCase__ = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
lowerCamelCase__ = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 356
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 310
| 0
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def lowercase__ ( lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : List[str] = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_UpperCamelCase : Union[str, Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_UpperCamelCase : Union[str, Any] = 4
_UpperCamelCase : Tuple = 48
_UpperCamelCase : Any = "pixelshuffle_aux"
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_UpperCamelCase : Optional[Any] = [6, 6, 6, 6]
_UpperCamelCase : Any = 60
_UpperCamelCase : Optional[int] = [6, 6, 6, 6]
_UpperCamelCase : List[str] = "pixelshuffledirect"
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_UpperCamelCase : Any = 4
_UpperCamelCase : int = "nearest+conv"
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_UpperCamelCase : Any = 1
_UpperCamelCase : Optional[Any] = 1
_UpperCamelCase : Optional[int] = 126
_UpperCamelCase : int = 7
_UpperCamelCase : Dict = 255.0
_UpperCamelCase : str = ""
return config
def lowercase__ ( lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
_UpperCamelCase : Union[str, Any] = name.replace("patch_embed.proj" ,"embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_UpperCamelCase : Optional[int] = name.replace("patch_embed.norm" ,"embeddings.patch_embeddings.layernorm" )
if "layers" in name:
_UpperCamelCase : str = name.replace("layers" ,"encoder.stages" )
if "residual_group.blocks" in name:
_UpperCamelCase : List[str] = name.replace("residual_group.blocks" ,"layers" )
if "attn.proj" in name:
_UpperCamelCase : Optional[Any] = name.replace("attn.proj" ,"attention.output.dense" )
if "attn" in name:
_UpperCamelCase : Optional[Any] = name.replace("attn" ,"attention.self" )
if "norm1" in name:
_UpperCamelCase : List[Any] = name.replace("norm1" ,"layernorm_before" )
if "norm2" in name:
_UpperCamelCase : Optional[Any] = name.replace("norm2" ,"layernorm_after" )
if "mlp.fc1" in name:
_UpperCamelCase : str = name.replace("mlp.fc1" ,"intermediate.dense" )
if "mlp.fc2" in name:
_UpperCamelCase : List[Any] = name.replace("mlp.fc2" ,"output.dense" )
if "q_bias" in name:
_UpperCamelCase : int = name.replace("q_bias" ,"query.bias" )
if "k_bias" in name:
_UpperCamelCase : Any = name.replace("k_bias" ,"key.bias" )
if "v_bias" in name:
_UpperCamelCase : int = name.replace("v_bias" ,"value.bias" )
if "cpb_mlp" in name:
_UpperCamelCase : Any = name.replace("cpb_mlp" ,"continuous_position_bias_mlp" )
if "patch_embed.proj" in name:
_UpperCamelCase : Optional[int] = name.replace("patch_embed.proj" ,"patch_embed.projection" )
if name == "norm.weight":
_UpperCamelCase : Optional[int] = "layernorm.weight"
if name == "norm.bias":
_UpperCamelCase : str = "layernorm.bias"
if "conv_first" in name:
_UpperCamelCase : List[str] = name.replace("conv_first" ,"first_convolution" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_UpperCamelCase : Tuple = name.replace("conv_last" ,"final_convolution" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_UpperCamelCase : int = name.replace("conv_before_upsample.0" ,"conv_before_upsample" )
if "upsample.0" in name:
_UpperCamelCase : Dict = name.replace("upsample.0" ,"upsample.convolution_0" )
if "upsample.2" in name:
_UpperCamelCase : int = name.replace("upsample.2" ,"upsample.convolution_1" )
_UpperCamelCase : Optional[int] = "upsample." + name
elif config.upsampler == "pixelshuffledirect":
_UpperCamelCase : Dict = name.replace("upsample.0.weight" ,"upsample.conv.weight" )
_UpperCamelCase : Tuple = name.replace("upsample.0.bias" ,"upsample.conv.bias" )
else:
pass
else:
_UpperCamelCase : List[str] = "swin2sr." + name
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_UpperCamelCase : List[str] = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
_UpperCamelCase : List[Any] = key.split("." )
_UpperCamelCase : str = int(key_split[1] )
_UpperCamelCase : Dict = int(key_split[4] )
_UpperCamelCase : Union[str, Any] = config.embed_dim
if "weight" in key:
_UpperCamelCase : List[str] = val[:dim, :]
_UpperCamelCase : Tuple = val[dim : dim * 2, :]
_UpperCamelCase : str = val[-dim:, :]
else:
_UpperCamelCase : Any = val[:dim]
_UpperCamelCase : Union[str, Any] = val[dim : dim * 2]
_UpperCamelCase : Tuple = val[-dim:]
pass
else:
_UpperCamelCase : Union[str, Any] = val
return orig_state_dict
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[str] = get_config(lowercase_ )
_UpperCamelCase : Optional[Any] = SwinaSRForImageSuperResolution(lowercase_ )
model.eval()
_UpperCamelCase : int = torch.hub.load_state_dict_from_url(lowercase_ ,map_location="cpu" )
_UpperCamelCase : Union[str, Any] = convert_state_dict(lowercase_ ,lowercase_ )
_UpperCamelCase, _UpperCamelCase : Any = model.load_state_dict(lowercase_ ,strict=lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError("Missing keys when converting: {}".format(lowercase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'''Unexpected key {key} in state_dict''' )
# verify values
_UpperCamelCase : Optional[int] = "https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"
_UpperCamelCase : int = Image.open(requests.get(lowercase_ ,stream=lowercase_ ).raw ).convert("RGB" )
_UpperCamelCase : List[Any] = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_UpperCamelCase : List[Any] = 126 if "Jpeg" in checkpoint_url else 256
_UpperCamelCase : List[Any] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
_UpperCamelCase : str = transforms(lowercase_ ).unsqueeze(0 )
if config.num_channels == 1:
_UpperCamelCase : Optional[int] = pixel_values[:, 0, :, :].unsqueeze(1 )
_UpperCamelCase : Any = model(lowercase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_UpperCamelCase : List[str] = torch.Size([1, 3, 512, 512] )
_UpperCamelCase : Optional[int] = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_UpperCamelCase : Any = torch.Size([1, 3, 1_024, 1_024] )
_UpperCamelCase : Tuple = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_UpperCamelCase : int = torch.Size([1, 3, 1_024, 1_024] )
_UpperCamelCase : Tuple = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_UpperCamelCase : List[Any] = torch.Size([1, 3, 512, 512] )
_UpperCamelCase : Dict = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_UpperCamelCase : Optional[int] = torch.Size([1, 3, 1_024, 1_024] )
_UpperCamelCase : Tuple = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] ,lowercase_ ,atol=1e-3 )
print("Looks ok!" )
_UpperCamelCase : List[str] = {
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": (
"swin2SR-classical-sr-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": (
"swin2SR-classical-sr-x4-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": (
"swin2SR-compressed-sr-x4-48"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": (
"swin2SR-lightweight-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": (
"swin2SR-realworld-sr-x4-64-bsrgan-psnr"
),
}
_UpperCamelCase : Dict = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub(F'''caidas/{model_name}''' )
processor.push_to_hub(F'''caidas/{model_name}''' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
lowerCamelCase__ = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 357
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowerCamelCase__ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
lowerCamelCase__ = f"""https://www.google.com/search?q={query}&num=100"""
lowerCamelCase__ = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
lowerCamelCase__ = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
lowerCamelCase__ = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 310
| 0
|
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Any ) -> Any:
raise NotImplementedError()
def __SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
raise NotImplementedError()
class __SCREAMING_SNAKE_CASE ( a_ ):
'''simple docstring'''
def __init__( self : List[str] , __a : "AutoTokenizer" , __a : bool = False , **__a : Any ) -> List[str]:
_UpperCamelCase : str = tokenizer
_UpperCamelCase : List[str] = skip_prompt
_UpperCamelCase : Union[str, Any] = decode_kwargs
# variables used in the streaming process
_UpperCamelCase : str = []
_UpperCamelCase : Any = 0
_UpperCamelCase : Optional[int] = True
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[int] ) -> Union[str, Any]:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
_UpperCamelCase : List[str] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_UpperCamelCase : Any = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_UpperCamelCase : List[Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
_UpperCamelCase : Any = text[self.print_len :]
_UpperCamelCase : List[str] = []
_UpperCamelCase : Tuple = 0
# If the last token is a CJK character, we print the characters.
elif len(lowercase_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_UpperCamelCase : Tuple = text[self.print_len :]
self.print_len += len(lowercase_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_UpperCamelCase : Dict = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(lowercase_ )
self.on_finalized_text(lowercase_ )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
_UpperCamelCase : Optional[Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_UpperCamelCase : Any = text[self.print_len :]
_UpperCamelCase : str = []
_UpperCamelCase : str = 0
else:
_UpperCamelCase : Any = ''''''
_UpperCamelCase : Tuple = True
self.on_finalized_text(lowercase_ , stream_end=lowercase_ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str , __a : bool = False ) -> Union[str, Any]:
print(lowercase_ , flush=lowercase_ , end="" if not stream_end else None )
def __SCREAMING_SNAKE_CASE ( self : str , __a : List[str] ) -> List[Any]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
class __SCREAMING_SNAKE_CASE ( a_ ):
'''simple docstring'''
def __init__( self : Any , __a : "AutoTokenizer" , __a : bool = False , __a : Optional[float] = None , **__a : Optional[Any] ) -> Any:
super().__init__(lowercase_ , lowercase_ , **lowercase_ )
_UpperCamelCase : int = Queue()
_UpperCamelCase : List[str] = None
_UpperCamelCase : str = timeout
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : str , __a : bool = False ) -> int:
self.text_queue.put(lowercase_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Any ) -> Optional[Any]:
return self
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
_UpperCamelCase : List[str] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 358
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = "xlm-roberta-xl"
def __init__( self : Any , __a : Tuple=25_0880 , __a : Optional[Any]=2560 , __a : List[str]=36 , __a : Any=32 , __a : Dict=1_0240 , __a : Optional[Any]="gelu" , __a : int=0.1 , __a : Tuple=0.1 , __a : str=514 , __a : Any=1 , __a : List[Any]=0.02 , __a : List[str]=1e-0_5 , __a : Optional[Any]=1 , __a : List[Any]=0 , __a : Tuple=2 , __a : int="absolute" , __a : Dict=True , __a : Dict=None , **__a : Tuple , ) -> str:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Dict = max_position_embeddings
_UpperCamelCase : Optional[Any] = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Union[str, Any] = use_cache
_UpperCamelCase : Optional[Any] = classifier_dropout
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCamelCase : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 310
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = 42
SCREAMING_SNAKE_CASE__ :List[Any] = 42
SCREAMING_SNAKE_CASE__ :List[str] = None
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = 2
@register_to_config
def __init__( self : List[str] , __a : float = 0.02 , __a : float = 100 , __a : float = 1.0_07 , __a : float = 80 , __a : float = 0.05 , __a : float = 50 , ) -> Optional[int]:
# standard deviation of the initial noise distribution
_UpperCamelCase : Optional[Any] = sigma_max
# setable values
_UpperCamelCase : List[str] = None
_UpperCamelCase : Tuple = None
_UpperCamelCase : Any = None # sigma(t_i)
def __SCREAMING_SNAKE_CASE ( self : Any , __a : torch.FloatTensor , __a : Optional[int] = None ) -> torch.FloatTensor:
return sample
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int , __a : Union[str, torch.device] = None ) -> List[Any]:
_UpperCamelCase : int = num_inference_steps
_UpperCamelCase : Union[str, Any] = np.arange(0 , self.num_inference_steps )[::-1].copy()
_UpperCamelCase : Dict = torch.from_numpy(__A ).to(__A )
_UpperCamelCase : Optional[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_UpperCamelCase : Optional[Any] = torch.tensor(__A , dtype=torch.floataa , device=__A )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : torch.FloatTensor , __a : float , __a : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
_UpperCamelCase : Tuple = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
_UpperCamelCase : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
_UpperCamelCase : Union[str, Any] = self.config.s_noise * randn_tensor(sample.shape , generator=__A ).to(sample.device )
_UpperCamelCase : Optional[Any] = sigma + gamma * sigma
_UpperCamelCase : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : torch.FloatTensor , __a : float , __a : float , __a : torch.FloatTensor , __a : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
_UpperCamelCase : Union[str, Any] = sample_hat + sigma_hat * model_output
_UpperCamelCase : int = (sample_hat - pred_original_sample) / sigma_hat
_UpperCamelCase : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__A , derivative=__A , pred_original_sample=__A )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : torch.FloatTensor , __a : float , __a : float , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
_UpperCamelCase : Any = sample_prev + sigma_prev * model_output
_UpperCamelCase : str = (sample_prev - pred_original_sample) / sigma_prev
_UpperCamelCase : Tuple = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__A , derivative=__A , pred_original_sample=__A )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[Any] , __a : int , __a : int ) -> int:
raise NotImplementedError()
| 359
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( *__a : int , **__a : int ) -> List[Any]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , __a : Optional[int] , __a : str ) -> Optional[Any]:
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , image_processor=__a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : Union[str, Any] ) -> int:
_UpperCamelCase : Any = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
import datasets
_UpperCamelCase : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_UpperCamelCase : List[Any] = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
_UpperCamelCase : List[Any] = object_detector(__a , threshold=0.0 )
self.assertEqual(len(__a ) , len(__a ) )
for outputs in batch_outputs:
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
pass
@require_torch
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[str] = "hf-internal-testing/tiny-detr-mobilenetsv3"
_UpperCamelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
_UpperCamelCase : Any = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = "facebook/detr-resnet-50"
_UpperCamelCase : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : Union[str, Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : List[str] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Dict = "facebook/detr-resnet-50"
_UpperCamelCase : Optional[Any] = pipeline("object-detection" , model=__a )
_UpperCamelCase : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : Tuple = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
_UpperCamelCase : Tuple = 0.99_85
_UpperCamelCase : List[Any] = "facebook/detr-resnet-50"
_UpperCamelCase : List[str] = pipeline("object-detection" , model=__a )
_UpperCamelCase : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__a )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = "Narsil/layoutlmv3-finetuned-funsd"
_UpperCamelCase : int = 0.99_93
_UpperCamelCase : str = pipeline("object-detection" , model=__a , threshold=__a )
_UpperCamelCase : Union[str, Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 310
| 0
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = ['input_features']
def __init__( self : Union[str, Any] , __a : Tuple=80 , __a : int=1_6000 , __a : List[Any]=160 , __a : str=30 , __a : Any=400 , __a : List[str]=0.0 , __a : Optional[int]=False , **__a : Dict , ) -> Dict:
super().__init__(
feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
_UpperCamelCase : Tuple = n_fft
_UpperCamelCase : str = hop_length
_UpperCamelCase : List[Any] = chunk_length
_UpperCamelCase : List[Any] = chunk_length * sampling_rate
_UpperCamelCase : List[Any] = self.n_samples // hop_length
_UpperCamelCase : List[Any] = sampling_rate
_UpperCamelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=_SCREAMING_SNAKE_CASE , norm="slaney" , mel_scale="slaney" , )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Union[str, Any] ) -> np.ndarray:
_UpperCamelCase : Optional[Any] = spectrogram(
_SCREAMING_SNAKE_CASE , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
_UpperCamelCase : Tuple = log_spec[:, :-1]
_UpperCamelCase : Tuple = np.maximum(_SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
_UpperCamelCase : Union[str, Any] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __SCREAMING_SNAKE_CASE ( __a : int , __a : List[Any] , __a : Optional[Any] = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
_UpperCamelCase : List[str] = np.array(_SCREAMING_SNAKE_CASE , np.intaa )
_UpperCamelCase : int = []
for vector, length in zip(_SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
_UpperCamelCase : int = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
_UpperCamelCase : Dict = padding_value
normed_input_values.append(_SCREAMING_SNAKE_CASE )
else:
_UpperCamelCase : List[str] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : List[Any] , __a : List[str] , __a : Any = True , __a : Union[str, Any] = None , __a : Union[str, Any] = None , __a : List[Any] = None , __a : List[str] = "max_length" , __a : int = None , __a : Tuple = None , __a : List[str] = None , **__a : Dict , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_UpperCamelCase : int = isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_UpperCamelCase : int = is_batched_numpy or (
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCamelCase : Any = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
_UpperCamelCase : str = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCamelCase : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCamelCase : Optional[int] = [np.asarray([raw_speech] ).T]
_UpperCamelCase : Optional[int] = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
_UpperCamelCase : int = self.pad(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_UpperCamelCase : List[str] = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
_UpperCamelCase : List[str] = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
_UpperCamelCase : Optional[int] = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
_UpperCamelCase : Optional[Any] = [self._np_extract_fbank_features(_SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , _SCREAMING_SNAKE_CASE ):
_UpperCamelCase : Tuple = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
_UpperCamelCase : Dict = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_UpperCamelCase : str = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
_UpperCamelCase : Any = padded_inputs.convert_to_tensors(_SCREAMING_SNAKE_CASE )
return padded_inputs
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict[str, Any]:
_UpperCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
_UpperCamelCase : Union[str, Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 360
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCamelCase__ = {"UserAgent": UserAgent().random}
def lowercase__ ( lowercase_ ) -> dict:
"""simple docstring"""
_UpperCamelCase : str = script.contents[0]
_UpperCamelCase : Any = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : str ) -> Tuple:
_UpperCamelCase : List[str] = F'''https://www.instagram.com/{username}/'''
_UpperCamelCase : Optional[Any] = self.get_json()
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> dict:
_UpperCamelCase : int = requests.get(self.url , headers=__a ).text
_UpperCamelCase : Union[str, Any] = BeautifulSoup(__a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : List[Any] ) -> str:
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : str ) -> str:
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
return self.user_data["username"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["full_name"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
return self.user_data["biography"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.user_data["business_email"]
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.user_data["external_url"]
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
return self.user_data["is_verified"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool:
return self.user_data["is_private"]
def lowercase__ ( lowercase_ = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
_UpperCamelCase : Union[str, Any] = InstagramUser(lowercase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,lowercase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = InstagramUser("github")
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 310
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowerCamelCase__ = logging.get_logger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : List[str] , **__a : Tuple ) -> Tuple:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_UpperCamelCase : Optional[Any] = deprecated_arg[3:]
_UpperCamelCase : Union[str, Any] = not kwargs.pop(lowerCamelCase_ )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
_UpperCamelCase : str = kwargs.pop("tpu_name" , self.tpu_name )
_UpperCamelCase : List[Any] = kwargs.pop("device_idx" , self.device_idx )
_UpperCamelCase : Optional[Any] = kwargs.pop("eager_mode" , self.eager_mode )
_UpperCamelCase : Union[str, Any] = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ :Any = field(
default=_UpperCamelCase , metadata={"help": "Name of TPU"} , )
SCREAMING_SNAKE_CASE__ :Dict = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
SCREAMING_SNAKE_CASE__ :str = field(default=_UpperCamelCase , metadata={"help": "Benchmark models in eager model."} )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = field(
default=_UpperCamelCase , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def __SCREAMING_SNAKE_CASE ( self : int ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["tf"] )
_UpperCamelCase : Optional[int] = None
if self.tpu:
try:
if self.tpu_name:
_UpperCamelCase : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_UpperCamelCase : Tuple = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_UpperCamelCase : Tuple = None
return tpu
@cached_property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_UpperCamelCase : List[Any] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
_UpperCamelCase : Optional[int] = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
_UpperCamelCase : int = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> bool:
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> "tf.distribute.Strategy":
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool:
return self.n_gpu > 0
| 361
|
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = tau * frequency / samplerate
_UpperCamelCase : Optional[int] = sin(lowercase_ )
_UpperCamelCase : Dict = cos(lowercase_ )
_UpperCamelCase : Any = _sin / (2 * q_factor)
_UpperCamelCase : str = (1 - _cos) / 2
_UpperCamelCase : Any = 1 - _cos
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : List[str] = -2 * _cos
_UpperCamelCase : Tuple = 1 - alpha
_UpperCamelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : List[str] = tau * frequency / samplerate
_UpperCamelCase : str = sin(lowercase_ )
_UpperCamelCase : Optional[Any] = cos(lowercase_ )
_UpperCamelCase : Dict = _sin / (2 * q_factor)
_UpperCamelCase : List[Any] = (1 + _cos) / 2
_UpperCamelCase : Optional[int] = -1 - _cos
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : int = -2 * _cos
_UpperCamelCase : str = 1 - alpha
_UpperCamelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Tuple = tau * frequency / samplerate
_UpperCamelCase : Optional[int] = sin(lowercase_ )
_UpperCamelCase : Dict = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Dict = _sin / 2
_UpperCamelCase : int = 0
_UpperCamelCase : str = -ba
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : Optional[int] = -2 * _cos
_UpperCamelCase : Optional[Any] = 1 - alpha
_UpperCamelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : str = tau * frequency / samplerate
_UpperCamelCase : Optional[Any] = sin(lowercase_ )
_UpperCamelCase : Optional[int] = cos(lowercase_ )
_UpperCamelCase : int = _sin / (2 * q_factor)
_UpperCamelCase : List[str] = 1 - alpha
_UpperCamelCase : int = -2 * _cos
_UpperCamelCase : Union[str, Any] = 1 + alpha
_UpperCamelCase : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : int = tau * frequency / samplerate
_UpperCamelCase : int = sin(lowercase_ )
_UpperCamelCase : List[Any] = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Optional[int] = 10 ** (gain_db / 40)
_UpperCamelCase : str = 1 + alpha * big_a
_UpperCamelCase : Union[str, Any] = -2 * _cos
_UpperCamelCase : Optional[int] = 1 - alpha * big_a
_UpperCamelCase : int = 1 + alpha / big_a
_UpperCamelCase : Optional[Any] = -2 * _cos
_UpperCamelCase : Any = 1 - alpha / big_a
_UpperCamelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = tau * frequency / samplerate
_UpperCamelCase : Any = sin(lowercase_ )
_UpperCamelCase : Union[str, Any] = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Union[str, Any] = 10 ** (gain_db / 40)
_UpperCamelCase : Dict = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase : int = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase : Dict = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase : int = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase : List[str] = 2 * sqrt(lowercase_ ) * alpha
_UpperCamelCase : Any = big_a * (pmc + aaa)
_UpperCamelCase : Dict = 2 * big_a * mpc
_UpperCamelCase : str = big_a * (pmc - aaa)
_UpperCamelCase : Dict = ppmc + aaa
_UpperCamelCase : List[Any] = -2 * pmpc
_UpperCamelCase : Dict = ppmc - aaa
_UpperCamelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Optional[int] = tau * frequency / samplerate
_UpperCamelCase : int = sin(lowercase_ )
_UpperCamelCase : Any = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : str = 10 ** (gain_db / 40)
_UpperCamelCase : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase : List[str] = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase : Dict = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase : Optional[Any] = 2 * sqrt(lowercase_ ) * alpha
_UpperCamelCase : List[Any] = big_a * (ppmc + aaa)
_UpperCamelCase : Dict = -2 * big_a * pmpc
_UpperCamelCase : Dict = big_a * (ppmc - aaa)
_UpperCamelCase : Optional[Any] = pmc + aaa
_UpperCamelCase : Any = 2 * mpc
_UpperCamelCase : Any = pmc - aaa
_UpperCamelCase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
| 310
| 0
|
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCamelCase__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[datasets.Features] = None
SCREAMING_SNAKE_CASE__ :str = "utf-8"
SCREAMING_SNAKE_CASE__ :Optional[str] = None
SCREAMING_SNAKE_CASE__ :Optional[str] = None
SCREAMING_SNAKE_CASE__ :bool = True # deprecated
SCREAMING_SNAKE_CASE__ :Optional[int] = None # deprecated
SCREAMING_SNAKE_CASE__ :int = 10 << 20 # 10MB
SCREAMING_SNAKE_CASE__ :Optional[bool] = None
class __SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = JsonConfig
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
_UpperCamelCase : Optional[Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[Any] ) -> Union[str, Any]:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_UpperCamelCase : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case__ , (str, list, tuple) ):
_UpperCamelCase : Dict = data_files
if isinstance(snake_case__ , snake_case__ ):
_UpperCamelCase : Optional[int] = [files]
_UpperCamelCase : Union[str, Any] = [dl_manager.iter_files(snake_case__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_UpperCamelCase : Tuple = []
for split_name, files in data_files.items():
if isinstance(snake_case__ , snake_case__ ):
_UpperCamelCase : Any = [files]
_UpperCamelCase : Tuple = [dl_manager.iter_files(snake_case__ ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case__ , gen_kwargs={"files": files} ) )
return splits
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
_UpperCamelCase : Optional[int] = self.config.features.arrow_schema.field(snake_case__ ).type
_UpperCamelCase : str = pa_table.append_column(snake_case__ , pa.array([None] * len(snake_case__ ) , type=snake_case__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
_UpperCamelCase : Optional[int] = table_cast(snake_case__ , self.config.features.arrow_schema )
return pa_table
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Dict ) -> Dict:
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(snake_case__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_UpperCamelCase : int = json.load(snake_case__ )
# We keep only the field we are interested in
_UpperCamelCase : str = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(snake_case__ , (list, tuple) ):
_UpperCamelCase : List[Any] = set().union(*[row.keys() for row in dataset] )
_UpperCamelCase : List[str] = {col: [row.get(snake_case__ ) for row in dataset] for col in keys}
else:
_UpperCamelCase : str = dataset
_UpperCamelCase : Tuple = pa.Table.from_pydict(snake_case__ )
yield file_idx, self._cast_table(snake_case__ )
# If the file has one json object per line
else:
with open(snake_case__ , "rb" ) as f:
_UpperCamelCase : Dict = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
_UpperCamelCase : List[Any] = max(self.config.chunksize // 32 , 16 << 10 )
_UpperCamelCase : Optional[Any] = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
_UpperCamelCase : Optional[int] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(snake_case__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
_UpperCamelCase : List[Any] = batch.decode(self.config.encoding , errors=snake_case__ ).encode("utf-8" )
try:
while True:
try:
_UpperCamelCase : List[Any] = paj.read_json(
io.BytesIO(snake_case__ ) , read_options=paj.ReadOptions(block_size=snake_case__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(snake_case__ , pa.ArrowInvalid )
and "straddling" not in str(snake_case__ )
or block_size > len(snake_case__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'''Batch of {len(snake_case__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
snake_case__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_UpperCamelCase : Optional[Any] = json.load(snake_case__ )
except json.JSONDecodeError:
logger.error(F'''Failed to read file \'{file}\' with error {type(snake_case__ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(snake_case__ , snake_case__ ): # list is the only sequence type supported in JSON
try:
_UpperCamelCase : str = set().union(*[row.keys() for row in dataset] )
_UpperCamelCase : List[str] = {col: [row.get(snake_case__ ) for row in dataset] for col in keys}
_UpperCamelCase : List[Any] = pa.Table.from_pydict(snake_case__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(snake_case__ )}: {e}''' )
raise ValueError(F'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(snake_case__ )
break
else:
logger.error(F'''Failed to read file \'{file}\' with error {type(snake_case__ )}: {e}''' )
raise ValueError(
F'''Not able to read records in the JSON file at {file}. '''
F'''You should probably indicate the field of the JSON file containing your records. '''
F'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
F'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case__ )
batch_idx += 1
| 362
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ )
if weight_type is not None:
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ).shape
else:
_UpperCamelCase : int = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "weight_g":
_UpperCamelCase : int = value
elif weight_type == "weight_v":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "bias":
_UpperCamelCase : int = value
else:
_UpperCamelCase : Any = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[str] = []
_UpperCamelCase : Any = fairseq_model.state_dict()
_UpperCamelCase : Union[str, Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,hf_model.config.feat_extract_norm == "group" ,)
_UpperCamelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
_UpperCamelCase : Dict = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_UpperCamelCase : Any = True
if "*" in mapped_key:
_UpperCamelCase : Dict = name.split(lowercase_ )[0].split("." )[-2]
_UpperCamelCase : Any = mapped_key.replace("*" ,lowercase_ )
if "weight_g" in name:
_UpperCamelCase : str = "weight_g"
elif "weight_v" in name:
_UpperCamelCase : Any = "weight_v"
elif "weight" in name:
_UpperCamelCase : List[str] = "weight"
elif "bias" in name:
_UpperCamelCase : List[Any] = "bias"
else:
_UpperCamelCase : str = None
set_recursively(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Any = full_name.split("conv_layers." )[-1]
_UpperCamelCase : Optional[Any] = name.split("." )
_UpperCamelCase : Union[str, Any] = int(items[0] )
_UpperCamelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCamelCase : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCamelCase : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCamelCase : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCamelCase : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Dict = SEWConfig()
if is_finetuned:
_UpperCamelCase : Dict = model.wav_encoder.wav_model.cfg
else:
_UpperCamelCase : List[Any] = model.cfg
_UpperCamelCase : Any = fs_config.conv_bias
_UpperCamelCase : str = eval(fs_config.conv_feature_layers )
_UpperCamelCase : Any = [x[0] for x in conv_layers]
_UpperCamelCase : List[Any] = [x[1] for x in conv_layers]
_UpperCamelCase : Union[str, Any] = [x[2] for x in conv_layers]
_UpperCamelCase : str = "gelu"
_UpperCamelCase : List[str] = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
_UpperCamelCase : Optional[int] = 0.0
_UpperCamelCase : Dict = fs_config.activation_fn.name
_UpperCamelCase : Any = fs_config.encoder_embed_dim
_UpperCamelCase : Optional[Any] = 0.02
_UpperCamelCase : str = fs_config.encoder_ffn_embed_dim
_UpperCamelCase : int = 1e-5
_UpperCamelCase : Optional[int] = fs_config.encoder_layerdrop
_UpperCamelCase : str = fs_config.encoder_attention_heads
_UpperCamelCase : Tuple = fs_config.conv_pos_groups
_UpperCamelCase : List[str] = fs_config.conv_pos
_UpperCamelCase : Optional[int] = len(lowercase_ )
_UpperCamelCase : Union[str, Any] = fs_config.encoder_layers
_UpperCamelCase : Union[str, Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_UpperCamelCase : List[str] = model.cfg
_UpperCamelCase : List[str] = fs_config.final_dropout
_UpperCamelCase : Optional[Any] = fs_config.layerdrop
_UpperCamelCase : int = fs_config.activation_dropout
_UpperCamelCase : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_UpperCamelCase : int = fs_config.attention_dropout
_UpperCamelCase : int = fs_config.dropout_input
_UpperCamelCase : List[Any] = fs_config.dropout
_UpperCamelCase : List[Any] = fs_config.mask_channel_length
_UpperCamelCase : List[str] = fs_config.mask_channel_prob
_UpperCamelCase : Optional[Any] = fs_config.mask_length
_UpperCamelCase : Optional[int] = fs_config.mask_prob
_UpperCamelCase : List[str] = "Wav2Vec2FeatureExtractor"
_UpperCamelCase : Optional[Any] = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=True ) -> str:
"""simple docstring"""
if is_finetuned:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_UpperCamelCase : str = SEWConfig.from_pretrained(lowercase_ )
else:
_UpperCamelCase : Optional[int] = convert_config(model[0] ,lowercase_ )
_UpperCamelCase : List[str] = model[0].eval()
_UpperCamelCase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False
_UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=lowercase_ ,return_attention_mask=lowercase_ ,)
if is_finetuned:
if dict_path:
_UpperCamelCase : Union[str, Any] = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCamelCase : List[str] = target_dict.pad_index
_UpperCamelCase : Optional[int] = target_dict.bos_index
_UpperCamelCase : Any = target_dict.pad_index
_UpperCamelCase : List[Any] = target_dict.bos_index
_UpperCamelCase : List[str] = target_dict.eos_index
_UpperCamelCase : Optional[Any] = len(target_dict.symbols )
_UpperCamelCase : List[Any] = os.path.join(lowercase_ ,"vocab.json" )
if not os.path.isdir(lowercase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase_ ) )
return
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
with open(lowercase_ ,"w" ,encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices ,lowercase_ )
_UpperCamelCase : Optional[Any] = WavaVecaCTCTokenizer(
lowercase_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=lowercase_ ,)
_UpperCamelCase : List[str] = WavaVecaProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
_UpperCamelCase : List[Any] = SEWForCTC(lowercase_ )
else:
_UpperCamelCase : int = SEWModel(lowercase_ )
feature_extractor.save_pretrained(lowercase_ )
recursively_load_weights(lowercase_ ,lowercase_ ,lowercase_ )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowerCamelCase__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 310
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
_UpperCamelCase : Union[str, Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
sd_pipe.set_scheduler("sample_euler" )
_UpperCamelCase : Optional[Any] = "A painting of a squirrel eating a burger"
_UpperCamelCase : List[str] = torch.manual_seed(0 )
_UpperCamelCase : Tuple = sd_pipe([prompt] , generator=lowerCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
_UpperCamelCase : Optional[int] = output.images
_UpperCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase : Dict = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase : Any = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
_UpperCamelCase : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
sd_pipe.set_scheduler("sample_euler" )
_UpperCamelCase : Union[str, Any] = "A painting of a squirrel eating a burger"
_UpperCamelCase : List[str] = torch.manual_seed(0 )
_UpperCamelCase : Any = sd_pipe([prompt] , generator=lowerCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
_UpperCamelCase : str = output.images
_UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase : Tuple = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : int = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
_UpperCamelCase : Dict = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
_UpperCamelCase : int = "A painting of a squirrel eating a burger"
_UpperCamelCase : int = torch.manual_seed(0 )
_UpperCamelCase : Any = sd_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=lowerCAmelCase__ , )
_UpperCamelCase : Optional[Any] = output.images
_UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase : Any = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 363
|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : int = prime_factors(lowercase_ )
if is_square_free(lowercase_ ):
return -1 if len(lowercase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 364
|
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = GPTaTokenizer
SCREAMING_SNAKE_CASE__ :Tuple = GPTaTokenizerFast
SCREAMING_SNAKE_CASE__ :Dict = True
SCREAMING_SNAKE_CASE__ :int = {"add_prefix_space": True}
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_UpperCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCamelCase : str = {"unk_token": "<unk>"}
_UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Optional[int] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Union[str, Any] ) -> int:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> Tuple:
_UpperCamelCase : List[Any] = "lower newer"
_UpperCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase : Optional[Any] = "lower newer"
_UpperCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_UpperCamelCase : Any = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : str = tokens + [tokenizer.unk_token]
_UpperCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = "lower newer"
# Testing tokenization
_UpperCamelCase : str = tokenizer.tokenize(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
_UpperCamelCase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
_UpperCamelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : List[Any] = tokenizer.encode(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
_UpperCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_UpperCamelCase : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : int , *__a : int , **__a : List[Any] ) -> Union[str, Any]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int=15 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
_UpperCamelCase : Optional[int] = "This is a simple input"
_UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Dict = ("This is a simple input", "This is a pair")
_UpperCamelCase : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
_UpperCamelCase : Union[str, Any] = "This is a simple input"
_UpperCamelCase : Optional[Any] = ["This is a simple input looooooooong", "This is a simple input"]
_UpperCamelCase : str = ("This is a simple input", "This is a pair")
_UpperCamelCase : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_UpperCamelCase : Union[str, Any] = tokenizer.pad_token_id
_UpperCamelCase : str = tokenizer(__a , padding="max_length" , max_length=30 , return_tensors="np" )
_UpperCamelCase : Tuple = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
_UpperCamelCase : str = tokenizer(*__a , padding="max_length" , max_length=60 , return_tensors="np" )
_UpperCamelCase : Optional[int] = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
_UpperCamelCase : Any = "$$$"
_UpperCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
_UpperCamelCase : int = "This is a simple input"
_UpperCamelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Union[str, Any] = tokenizer.bos_token_id
_UpperCamelCase : str = tokenizer(__a )
_UpperCamelCase : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCamelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_UpperCamelCase : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
_UpperCamelCase : Optional[Any] = [self.get_tokenizer(do_lower_case=__a , add_bos_token=__a )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : Tuple = "Encode this."
_UpperCamelCase : List[str] = "This one too please."
_UpperCamelCase : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a )
encoded_sequence += tokenizer.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer.encode_plus(
__a , __a , add_special_tokens=__a , return_special_tokens_mask=__a , )
_UpperCamelCase : str = encoded_sequence_dict["input_ids"]
_UpperCamelCase : Optional[int] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(__a ) , len(__a ) )
_UpperCamelCase : Union[str, Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__a )
]
_UpperCamelCase : Union[str, Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__a , __a )
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Any = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("test_opt" )
_UpperCamelCase : str = AutoTokenizer.from_pretrained("./test_opt" )
_UpperCamelCase : Optional[Any] = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
_UpperCamelCase : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Union[str, Any] = tokenizer.encode(
__a , )
# Same as above
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[str] = "bos"
_UpperCamelCase : Tuple = tokenizer.get_vocab()["bos"]
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : List[Any] = tokenizer.encode(
__a , )
# We changed the bos token
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("./tok" )
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
_UpperCamelCase : Tuple = tokenizer.encode(
__a , )
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
| 310
| 0
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __a : Optional[Any] ) -> None:
_UpperCamelCase : str = len(lowerCAmelCase_ )
_UpperCamelCase : Optional[Any] = [0] * len_array
if len_array > 0:
_UpperCamelCase : Tuple = array[0]
for i in range(1 , lowerCAmelCase_ ):
_UpperCamelCase : int = self.prefix_sum[i - 1] + array[i]
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Tuple , __a : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Any ) -> bool:
_UpperCamelCase : List[Any] = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCAmelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCamelCase__ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __SCREAMING_SNAKE_CASE ( unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = load_tool("text-question-answering" )
self.tool.setup()
_UpperCamelCase : Union[str, Any] = load_tool("text-question-answering" , remote=__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Dict = self.tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.remote_tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Dict = self.tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : List[Any] = self.remote_tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
| 310
| 0
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,) -> Dict:
"""simple docstring"""
_UpperCamelCase : Optional[int] = coefficient_matrix.shape
_UpperCamelCase : Union[str, Any] = constant_matrix.shape
if rowsa != colsa:
_UpperCamelCase : Optional[int] = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(lowerCAmelCase__ )
if colsa != 1:
_UpperCamelCase : Dict = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(lowerCAmelCase__ )
if rowsa != rowsa:
_UpperCamelCase : Dict = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) != rowsa:
_UpperCamelCase : Union[str, Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F'''matrix but received {len(lowerCAmelCase__ )} and {rowsa}'''
)
raise ValueError(lowerCAmelCase__ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
_UpperCamelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) ,axis=1 )
_UpperCamelCase : str = table.shape
strictly_diagonally_dominant(lowerCAmelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(lowerCAmelCase__ ):
_UpperCamelCase : List[Any] = []
for row in range(lowerCAmelCase__ ):
_UpperCamelCase : str = 0
for col in range(lowerCAmelCase__ ):
if col == row:
_UpperCamelCase : str = table[row][col]
elif col == cols - 1:
_UpperCamelCase : Optional[Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_UpperCamelCase : List[str] = (temp + val) / denom
new_val.append(lowerCAmelCase__ )
_UpperCamelCase : List[Any] = new_val
return [float(lowerCAmelCase__ ) for i in new_val]
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Dict = table.shape
_UpperCamelCase : int = True
for i in range(0 ,lowerCAmelCase__ ):
_UpperCamelCase : List[str] = 0
for j in range(0 ,cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
"""simple docstring"""
lowerCamelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Tuple = [False] * len(lowercase_ )
_UpperCamelCase : Dict = [s]
_UpperCamelCase : List[str] = True
while queue:
_UpperCamelCase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase_ )
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : List[str] = u
return visited[t]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = [-1] * (len(lowercase_ ))
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : str = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ):
_UpperCamelCase : int = float("Inf" )
_UpperCamelCase : Optional[Any] = sink
while s != source:
# Find the minimum value in select path
_UpperCamelCase : List[Any] = min(lowercase_ ,graph[parent[s]][s] )
_UpperCamelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_UpperCamelCase : Union[str, Any] = sink
while v != source:
_UpperCamelCase : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCamelCase : Dict = parent[v]
for i in range(len(lowercase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 310
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = 42
SCREAMING_SNAKE_CASE__ :Union[str, Any] = 42
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Any , __a : int ) -> Dict:
_UpperCamelCase : list[list[Edge]] = [[] for _ in range(UpperCamelCase__ )]
_UpperCamelCase : int = size
def __getitem__( self : Dict , __a : int ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
return self._size
def __SCREAMING_SNAKE_CASE ( self : Any , __a : int , __a : int , __a : int ) -> Union[str, Any]:
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(UpperCamelCase__ , UpperCamelCase__ ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int , __a : int ) -> int | None:
_UpperCamelCase : Optional[Any] = deque([start_vertex] )
_UpperCamelCase : list[int | None] = [None] * self.size
_UpperCamelCase : Dict = 0
while queue:
_UpperCamelCase : Union[str, Any] = queue.popleft()
_UpperCamelCase : str = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_UpperCamelCase : int = current_distance + edge.weight
_UpperCamelCase : int = distances[edge.destination_vertex]
if (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and new_distance >= dest_vertex_distance
):
continue
_UpperCamelCase : Optional[Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(lowercase_ ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase_ ,(list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase_ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ["pixel_values"]
def __init__( self : List[str] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : List[Any] , ) -> None:
super().__init__(**__a )
_UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 256}
_UpperCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : int = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCamelCase : Optional[Any] = get_size_dict(__a , param_name="crop_size" )
_UpperCamelCase : str = do_resize
_UpperCamelCase : Dict = size
_UpperCamelCase : int = do_center_crop
_UpperCamelCase : int = crop_size
_UpperCamelCase : Optional[Any] = resample
_UpperCamelCase : Dict = do_rescale
_UpperCamelCase : Any = rescale_factor
_UpperCamelCase : Any = offset
_UpperCamelCase : Union[str, Any] = do_normalize
_UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __SCREAMING_SNAKE_CASE ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray:
_UpperCamelCase : Any = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" in size:
_UpperCamelCase : str = get_resize_output_image_size(__a , size["shortest_edge"] , default_to_square=__a )
elif "height" in size and "width" in size:
_UpperCamelCase : Any = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ) -> np.ndarray:
_UpperCamelCase : List[Any] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Union[int, float] , __a : bool = True , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> Optional[Any]:
_UpperCamelCase : Any = image.astype(np.floataa )
if offset:
_UpperCamelCase : Dict = image - (scale / 2)
return rescale(__a , scale=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
_UpperCamelCase : Optional[Any] = to_numpy_array(__a )
if do_resize:
_UpperCamelCase : Any = self.resize(image=__a , size=__a , resample=__a )
if do_center_crop:
_UpperCamelCase : Dict = self.center_crop(__a , size=__a )
if do_rescale:
_UpperCamelCase : Union[str, Any] = self.rescale(image=__a , scale=__a , offset=__a )
if do_normalize:
_UpperCamelCase : int = self.normalize(image=__a , mean=__a , std=__a )
_UpperCamelCase : str = to_channel_dimension_format(__a , __a )
return image
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
_UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Optional[int] = resample if resample is not None else self.resample
_UpperCamelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : str = offset if offset is not None else self.offset
_UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : Tuple = image_std if image_std is not None else self.image_std
_UpperCamelCase : int = size if size is not None else self.size
_UpperCamelCase : Tuple = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase : Optional[int] = get_size_dict(__a , param_name="crop_size" )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
_UpperCamelCase : Union[str, Any] = make_batched(__a )
_UpperCamelCase : Optional[Any] = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
_UpperCamelCase : List[Any] = {"pixel_values": videos}
return BatchFeature(data=__a , tensor_type=__a )
| 310
| 0
|
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __SCREAMING_SNAKE_CASE ( _a ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : List[str] = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
with self.assertRaises(__a ):
_UpperCamelCase : int = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
with self.assertRaises(__a ):
_UpperCamelCase : List[str] = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCamelCase : Tuple = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
_UpperCamelCase : List[Any] = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase : str = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
_UpperCamelCase : Dict = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCamelCase : List[str] = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
_UpperCamelCase : Optional[int] = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : Tuple = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
import PIL.Image
_UpperCamelCase : int = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=__a ) as mock_cast_to_python_objects:
_UpperCamelCase : Tuple = pa.array(TypedSequence([{"path": None, "bytes": b"image_bytes"}, pil_image] , type=Image() ) )
_UpperCamelCase : Union[str, Any] = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , __a )
self.assertFalse(kwargs["optimize_list_casting"] )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = pa.BufferReader(lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,pa.Buffer ) else pa.memory_map(lowerCamelCase_ )
_UpperCamelCase : Any = pa.ipc.open_stream(lowerCamelCase_ )
_UpperCamelCase : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 10] )
@pytest.mark.parametrize(
"fields" ,[None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Dict = pa.BufferOutputStream()
_UpperCamelCase : str = pa.schema(lowerCamelCase_ ) if fields else None
with ArrowWriter(stream=lowerCamelCase_ ,schema=lowerCamelCase_ ,writer_batch_size=lowerCamelCase_ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCamelCase : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCamelCase : Dict = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase_ ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Tuple = pa.BufferOutputStream()
_UpperCamelCase : Optional[int] = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=lowerCamelCase_ ,features=lowerCamelCase_ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
_UpperCamelCase : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCamelCase : Any = pa.BufferReader(output.getvalue() )
_UpperCamelCase : Optional[int] = pa.ipc.open_stream(lowerCamelCase_ )
_UpperCamelCase : pa.Table = f.read_all()
_UpperCamelCase : Dict = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowerCamelCase_ )
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 10] )
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : int = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase_ ,writer_batch_size=lowerCamelCase_ ,hash_salt="split_name" ,check_duplicates=lowerCamelCase_ ,) as writer:
with pytest.raises(lowerCamelCase_ ):
writer.write({"col_1": "foo", "col_2": 1} ,key=[1, 2] )
_UpperCamelCase : Any = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" ,[None, 2, 10] )
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase_ ,writer_batch_size=lowerCamelCase_ ,hash_salt="split_name" ,check_duplicates=lowerCamelCase_ ,) as writer:
with pytest.raises(lowerCamelCase_ ):
writer.write({"col_1": "foo", "col_2": 1} ,key=10 )
writer.write({"col_1": "bar", "col_2": 2} ,key=10 )
_UpperCamelCase : Dict = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" ,[None, 2, 10] )
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase_ ,writer_batch_size=lowerCamelCase_ ,hash_salt="split_name" ,check_duplicates=lowerCamelCase_ ,) as writer:
writer.write({"col_1": "foo", "col_2": 1} ,key=1 )
writer.write({"col_1": "bar", "col_2": 2} ,key=2 )
_UpperCamelCase : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 10] )
@pytest.mark.parametrize(
"fields" ,[None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : str = pa.BufferOutputStream()
_UpperCamelCase : Dict = pa.schema(lowerCamelCase_ ) if fields else None
with ArrowWriter(stream=lowerCamelCase_ ,schema=lowerCamelCase_ ,writer_batch_size=lowerCamelCase_ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
_UpperCamelCase : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCamelCase : Tuple = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase_ ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 10] )
@pytest.mark.parametrize(
"fields" ,[None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : int = pa.BufferOutputStream()
_UpperCamelCase : Optional[Any] = pa.schema(lowerCamelCase_ ) if fields else None
with ArrowWriter(stream=lowerCamelCase_ ,schema=lowerCamelCase_ ,writer_batch_size=lowerCamelCase_ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
_UpperCamelCase : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCamelCase : List[str] = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase_ ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 10] )
@pytest.mark.parametrize(
"fields" ,[None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Any = pa.BufferOutputStream()
_UpperCamelCase : List[Any] = pa.schema(lowerCamelCase_ ) if fields else None
with ArrowWriter(stream=lowerCamelCase_ ,schema=lowerCamelCase_ ,writer_batch_size=lowerCamelCase_ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
_UpperCamelCase : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCamelCase : Any = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase_ ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase : Union[str, Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
_UpperCamelCase : Optional[Any] = os.path.join(lowerCamelCase_ ,"test.arrow" )
with ArrowWriter(path=lowerCamelCase_ ,schema=pa.schema(lowerCamelCase_ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
_UpperCamelCase : Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowerCamelCase_ ,metadata=writer._schema.metadata )
_check_output(lowerCamelCase_ ,1 )
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
if pa.types.is_list(lowerCamelCase_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def lowercase__ ( lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
if isinstance(lst[0] ,lowerCamelCase_ ):
change_first_primitive_element_in_list(lst[0] ,lowerCamelCase_ )
else:
_UpperCamelCase : List[Any] = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" ,[(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" ,[[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = pa.array(TypedSequence(lowerCamelCase_ ,optimized_int_type=lowerCamelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" ,[
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] ,)
@pytest.mark.parametrize("sequence" ,[[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = pa.array(OptimizedTypedSequence(lowerCamelCase_ ,col=lowerCamelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCamelCase : Any = copy.deepcopy(lowerCamelCase_ )
_UpperCamelCase : Dict = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowerCamelCase_ ,lowerCamelCase_ )
_UpperCamelCase : List[Any] = pa.array(OptimizedTypedSequence(lowerCamelCase_ ,col=lowerCamelCase_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" ,[False, True] )
def lowercase__ ( lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : Tuple = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=lowerCamelCase_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Tuple = 'mock://dataset-train.arrow'
with ArrowWriter(path=lowerCamelCase_ ,storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs ,type(lowerCamelCase_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCamelCase : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowerCamelCase_ )
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = pa.BufferOutputStream()
with ParquetWriter(stream=lowerCamelCase_ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCamelCase : Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCamelCase : List[Any] = pa.BufferReader(output.getvalue() )
_UpperCamelCase : pa.Table = pq.read_table(lowerCamelCase_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" ,[False, True] )
def lowercase__ ( lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
import PIL.Image
_UpperCamelCase : Dict = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) ,dtype=np.uinta ) ).save(lowerCamelCase_ ,format="png" )
_UpperCamelCase : List[Any] = pa.BufferOutputStream()
with ParquetWriter(
stream=lowerCamelCase_ ,features=Features({"image": Image()} ) ,embed_local_files=lowerCamelCase_ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
_UpperCamelCase : Optional[int] = pa.BufferReader(output.getvalue() )
_UpperCamelCase : pa.Table = pq.read_table(lowerCamelCase_ )
_UpperCamelCase : List[str] = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] ,lowerCamelCase_ )
with open(lowerCamelCase_ ,"rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def lowercase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Tuple = pa.schema([pa.field("col_1" ,pa.string() ,nullable=lowerCamelCase_ )] )
_UpperCamelCase : Dict = pa.BufferOutputStream()
with ArrowWriter(stream=lowerCamelCase_ ) as writer:
writer._build_writer(inferred_schema=lowerCamelCase_ )
assert writer._schema == pa.schema([pa.field("col_1" ,pa.string() )] )
| 368
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCamelCase__ = True
except ImportError:
lowerCamelCase__ = False
try:
from torch.hub import _get_torch_home
lowerCamelCase__ = _get_torch_home()
except ImportError:
lowerCamelCase__ = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
lowerCamelCase__ = os.path.join(torch_cache_home, "transformers")
lowerCamelCase__ = "https://cdn.huggingface.co"
lowerCamelCase__ = "https://s3.amazonaws.com/models.huggingface.co/bert"
lowerCamelCase__ = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
lowerCamelCase__ = os.path.join(PATH, "config.yaml")
lowerCamelCase__ = os.path.join(PATH, "attributes.txt")
lowerCamelCase__ = os.path.join(PATH, "objects.txt")
lowerCamelCase__ = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
lowerCamelCase__ = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
lowerCamelCase__ = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
lowerCamelCase__ = "pytorch_model.bin"
lowerCamelCase__ = "config.yaml"
def lowercase__ ( lowercase_=OBJECTS ,lowercase_=ATTRIBUTES ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
_UpperCamelCase : Any = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = OrderedDict()
with open(lowercase_ ,"rb" ) as f:
_UpperCamelCase : List[str] = pkl.load(lowercase_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
_UpperCamelCase : List[str] = ckp.pop(lowercase_ )
if isinstance(lowercase_ ,np.ndarray ):
_UpperCamelCase : List[Any] = torch.tensor(lowercase_ )
else:
assert isinstance(lowercase_ ,torch.tensor ), type(lowercase_ )
_UpperCamelCase : Optional[Any] = v
return r
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = {}
def __init__( self : str , __a : dict , __a : str = "root" , __a : Any=0 ) -> Any:
_UpperCamelCase : Optional[Any] = name
_UpperCamelCase : Optional[Any] = level
_UpperCamelCase : Union[str, Any] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_UpperCamelCase : Optional[int] = copy.deepcopy(__a )
_UpperCamelCase : Dict = copy.deepcopy(__a )
if isinstance(__a , __a ):
_UpperCamelCase : Union[str, Any] = Config(__a , name=__a , level=level + 1 )
_UpperCamelCase : Optional[Any] = v
setattr(self , __a , __a )
_UpperCamelCase : Optional[Any] = d
def __repr__( self : List[str] ) -> List[Any]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Dict , __a : Union[str, Any] , __a : Optional[int] ) -> int:
_UpperCamelCase : Any = val
_UpperCamelCase : Optional[Any] = val
_UpperCamelCase : Dict = key.split("." )
_UpperCamelCase : int = len(__a ) - 1
_UpperCamelCase : List[str] = self._pointer
if len(__a ) > 1:
for i, l in enumerate(__a ):
if hasattr(self , __a ) and isinstance(getattr(self , __a ) , __a ):
setattr(getattr(self , __a ) , ".".join(levels[i:] ) , __a )
if l == last_level:
_UpperCamelCase : str = val
else:
_UpperCamelCase : List[str] = pointer[l]
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._pointer
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Tuple , __a : List[str] ) -> Dict:
with open(F'''{file_name}''' , "w" ) as stream:
dump(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[Any] , __a : Dict ) -> List[Any]:
with open(F'''{file_name}''' , "w" ) as stream:
json.dump(__a , __a )
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : Union[str, Any] ) -> Optional[int]:
with open(__a ) as stream:
_UpperCamelCase : int = load(__a , Loader=__a )
return data
def __str__( self : List[str] ) -> Tuple:
_UpperCamelCase : List[str] = " "
if self._name != "root":
_UpperCamelCase : Dict = F'''{t * (self._level-1)}{self._name}:\n'''
else:
_UpperCamelCase : Any = ""
_UpperCamelCase : Any = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__a , __a ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(__a ).__name__})\n'''
_UpperCamelCase : Optional[Any] = level
return r[:-1]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Dict , __a : str , **__a : str ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : int = cls.get_config_dict(__a , **__a )
return cls(__a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , __a : str , **__a : Union[str, Any] ) -> Tuple:
_UpperCamelCase : Tuple = kwargs.pop("cache_dir" , __a )
_UpperCamelCase : Optional[int] = kwargs.pop("force_download" , __a )
_UpperCamelCase : str = kwargs.pop("resume_download" , __a )
_UpperCamelCase : Any = kwargs.pop("proxies" , __a )
_UpperCamelCase : List[Any] = kwargs.pop("local_files_only" , __a )
if os.path.isdir(__a ):
_UpperCamelCase : Optional[Any] = os.path.join(__a , __a )
elif os.path.isfile(__a ) or is_remote_url(__a ):
_UpperCamelCase : Optional[int] = pretrained_model_name_or_path
else:
_UpperCamelCase : int = hf_bucket_url(__a , filename=__a , use_cdn=__a )
try:
# Load from URL or cache if already cached
_UpperCamelCase : Optional[int] = cached_path(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_UpperCamelCase : List[Any] = Config.load_yaml(__a )
except EnvironmentError:
_UpperCamelCase : Union[str, Any] = "Can't load config for"
raise EnvironmentError(__a )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(__a ), kwargs
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : str = torch.load("dump.pt" ,map_location=in_tensor.device )
_UpperCamelCase : str = in_tensor.numpy()
_UpperCamelCase : Union[str, Any] = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ), (
F'''{sum([1 for x in np.isclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Dict = urlparse(lowercase_ )
return parsed.scheme in ("http", "https")
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=True ) -> str:
"""simple docstring"""
_UpperCamelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_UpperCamelCase : List[str] = "/" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=0 ,lowercase_=None ,) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowercase_ ,lowercase_ ):
ua += "; " + "; ".join("{}/{}".format(lowercase_ ,lowercase_ ) for k, v in user_agent.items() )
elif isinstance(lowercase_ ,lowercase_ ):
ua += "; " + user_agent
_UpperCamelCase : Any = {"user-agent": ua}
if resume_size > 0:
_UpperCamelCase : str = "bytes=%d-" % (resume_size,)
_UpperCamelCase : str = requests.get(lowercase_ ,stream=lowercase_ ,proxies=lowercase_ ,headers=lowercase_ )
if response.status_code == 416: # Range not satisfiable
return
_UpperCamelCase : List[str] = response.headers.get("Content-Length" )
_UpperCamelCase : Union[str, Any] = resume_size + int(lowercase_ ) if content_length is not None else None
_UpperCamelCase : Optional[int] = tqdm(
unit="B" ,unit_scale=lowercase_ ,total=lowercase_ ,initial=lowercase_ ,desc="Downloading" ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowercase_ ) )
temp_file.write(lowercase_ )
progress.close()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=10 ,lowercase_=False ,lowercase_=None ,lowercase_=False ,) -> Tuple:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : str = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : Dict = str(lowercase_ )
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
_UpperCamelCase : Dict = None
if not local_files_only:
try:
_UpperCamelCase : List[Any] = requests.head(lowercase_ ,allow_redirects=lowercase_ ,proxies=lowercase_ ,timeout=lowercase_ )
if response.status_code == 200:
_UpperCamelCase : str = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_UpperCamelCase : int = url_to_filename(lowercase_ ,lowercase_ )
# get cache path to put the file
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowercase_ ):
return cache_path
else:
_UpperCamelCase : Optional[int] = [
file
for file in fnmatch.filter(os.listdir(lowercase_ ) ,filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(lowercase_ ) > 0:
return os.path.join(lowercase_ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(lowercase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_UpperCamelCase : Dict = cache_path + ".lock"
with FileLock(lowercase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowercase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_UpperCamelCase : List[str] = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(lowercase_ ,"a+b" ) as f:
yield f
_UpperCamelCase : Union[str, Any] = _resumable_file_manager
if os.path.exists(lowercase_ ):
_UpperCamelCase : str = os.stat(lowercase_ ).st_size
else:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Tuple = partial(tempfile.NamedTemporaryFile ,dir=lowercase_ ,delete=lowercase_ )
_UpperCamelCase : Optional[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" ,lowercase_ ,temp_file.name ,)
http_get(
lowercase_ ,lowercase_ ,proxies=lowercase_ ,resume_size=lowercase_ ,user_agent=lowercase_ ,)
os.replace(temp_file.name ,lowercase_ )
_UpperCamelCase : Optional[int] = {"url": url, "etag": etag}
_UpperCamelCase : List[str] = cache_path + ".json"
with open(lowercase_ ,"w" ) as meta_file:
json.dump(lowercase_ ,lowercase_ )
return cache_path
def lowercase__ ( lowercase_ ,lowercase_=None ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[int] = url.encode("utf-8" )
_UpperCamelCase : List[str] = shaaaa(lowercase_ )
_UpperCamelCase : List[str] = url_hash.hexdigest()
if etag:
_UpperCamelCase : Optional[Any] = etag.encode("utf-8" )
_UpperCamelCase : Optional[Any] = shaaaa(lowercase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=False ,lowercase_=False ,) -> str:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : List[Any] = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if is_remote_url(lowercase_ ):
# URL, so get it from the cache (downloading if necessary)
_UpperCamelCase : Union[str, Any] = get_from_cache(
lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,proxies=lowercase_ ,resume_download=lowercase_ ,user_agent=lowercase_ ,local_files_only=lowercase_ ,)
elif os.path.exists(lowercase_ ):
# File, and it exists.
_UpperCamelCase : List[str] = url_or_filename
elif urlparse(lowercase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(lowercase_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(lowercase_ ) )
if extract_compressed_file:
if not is_zipfile(lowercase_ ) and not tarfile.is_tarfile(lowercase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_UpperCamelCase, _UpperCamelCase : Any = os.path.split(lowercase_ )
_UpperCamelCase : Optional[int] = output_file.replace("." ,"-" ) + "-extracted"
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
if os.path.isdir(lowercase_ ) and os.listdir(lowercase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_UpperCamelCase : Optional[int] = output_path + ".lock"
with FileLock(lowercase_ ):
shutil.rmtree(lowercase_ ,ignore_errors=lowercase_ )
os.makedirs(lowercase_ )
if is_zipfile(lowercase_ ):
with ZipFile(lowercase_ ,"r" ) as zip_file:
zip_file.extractall(lowercase_ )
zip_file.close()
elif tarfile.is_tarfile(lowercase_ ):
_UpperCamelCase : int = tarfile.open(lowercase_ )
tar_file.extractall(lowercase_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(lowercase_ ) )
return output_path_extracted
return output_path
def lowercase__ ( lowercase_ ,lowercase_="," ) -> Optional[int]:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
with open(lowercase_ ) as f:
_UpperCamelCase : Tuple = eval(f.read() )
else:
_UpperCamelCase : str = requests.get(lowercase_ )
try:
_UpperCamelCase : Optional[int] = requests.json()
except Exception:
_UpperCamelCase : Union[str, Any] = req.content.decode()
assert data is not None, "could not connect"
try:
_UpperCamelCase : List[Any] = eval(lowercase_ )
except Exception:
_UpperCamelCase : int = data.split("\n" )
req.close()
return data
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : List[Any] = requests.get(lowercase_ )
_UpperCamelCase : Optional[int] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : List[Any] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowercase_ )
with open(lowercase_ ,"rb" ) as stream:
_UpperCamelCase : Union[str, Any] = pkl.load(lowercase_ )
_UpperCamelCase : Union[str, Any] = weights.pop("model" )
_UpperCamelCase : Optional[int] = {}
for k, v in model.items():
_UpperCamelCase : str = torch.from_numpy(lowercase_ )
if "running_var" in k:
_UpperCamelCase : List[Any] = torch.tensor([0] )
_UpperCamelCase : str = k.replace("running_var" ,"num_batches_tracked" )
_UpperCamelCase : Any = zero
return new
def lowercase__ ( ) -> Dict:
"""simple docstring"""
print(F'''{os.path.abspath(os.path.join(lowercase_ ,os.pardir ) )}/demo.ipynb''' )
def lowercase__ ( lowercase_ ,lowercase_="RGB" ) -> int:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : Optional[Any] = cva.imread(lowercase_ )
else:
_UpperCamelCase : Optional[int] = get_image_from_url(lowercase_ )
assert img is not None, F'''could not connect to: {im}'''
_UpperCamelCase : Optional[int] = cva.cvtColor(lowercase_ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
_UpperCamelCase : List[Any] = img[:, :, ::-1]
return img
def lowercase__ ( lowercase_ ,lowercase_=1 ) -> List[Any]:
"""simple docstring"""
return (images[i : i + batch] for i in range(0 ,len(lowercase_ ) ,lowercase_ ))
| 310
| 0
|
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowercase__ ( lowercase_ ) -> Dict:
"""simple docstring"""
return EnvironmentCommand()
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class __SCREAMING_SNAKE_CASE ( __a ):
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : ArgumentParser ) -> Union[str, Any]:
_UpperCamelCase : Dict = parser.add_parser("env" )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
"--accelerate-config_file" , default=UpperCamelCase__ , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self : str , __a : Any , *__a : Dict ) -> None:
_UpperCamelCase : str = accelerate_config_file
def __SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
_UpperCamelCase : List[str] = "not installed"
if is_safetensors_available():
import safetensors
_UpperCamelCase : int = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
_UpperCamelCase : Any = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
_UpperCamelCase : str = "not installed"
_UpperCamelCase : Optional[int] = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_UpperCamelCase : Union[str, Any] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
_UpperCamelCase : str = load_config_from_file(self._accelerate_config_file ).to_dict()
_UpperCamelCase : int = (
"\n".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else F'''\t{accelerate_config}'''
)
_UpperCamelCase : Tuple = "not installed"
_UpperCamelCase : Dict = "NA"
if is_torch_available():
import torch
_UpperCamelCase : Union[str, Any] = torch.__version__
_UpperCamelCase : Tuple = torch.cuda.is_available()
_UpperCamelCase : str = "not installed"
_UpperCamelCase : str = "NA"
if is_tf_available():
import tensorflow as tf
_UpperCamelCase : Tuple = tf.__version__
try:
# deprecated in v2.1
_UpperCamelCase : Dict = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_UpperCamelCase : str = bool(tf.config.list_physical_devices("GPU" ) )
_UpperCamelCase : Union[str, Any] = "not installed"
_UpperCamelCase : Dict = "not installed"
_UpperCamelCase : Optional[int] = "not installed"
_UpperCamelCase : Tuple = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
_UpperCamelCase : Union[str, Any] = flax.__version__
_UpperCamelCase : Any = jax.__version__
_UpperCamelCase : Tuple = jaxlib.__version__
_UpperCamelCase : Optional[Any] = jax.lib.xla_bridge.get_backend().platform
_UpperCamelCase : Union[str, Any] = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": F'''{safetensors_version}''',
"Accelerate version": F'''{accelerate_version}''',
"Accelerate config": F'''{accelerate_config_str}''',
"PyTorch version (GPU?)": F'''{pt_version} ({pt_cuda_available})''',
"Tensorflow version (GPU?)": F'''{tf_version} ({tf_cuda_available})''',
"Flax version (CPU?/GPU?/TPU?)": F'''{flax_version} ({jax_backend})''',
"Jax version": F'''{jax_version}''',
"JaxLib version": F'''{jaxlib_version}''',
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : Any ) -> int:
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 369
|
"""simple docstring"""
import torch
from transformers import AutoModel
class __SCREAMING_SNAKE_CASE ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple="sayef/fsner-bert-base-uncased" ) -> Dict:
super(__a , self ).__init__()
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained(__a , return_dict=__a )
_UpperCamelCase : str = torch.nn.CosineSimilarity(3 , 1e-0_8 )
_UpperCamelCase : List[str] = torch.nn.Softmax(dim=1 )
def __SCREAMING_SNAKE_CASE ( self : int , **__a : Tuple ) -> Optional[Any]:
return self.bert(**__a ).last_hidden_state
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Any , __a : List[Any] , __a : Tuple=1 ) -> List[Any]:
return self.softmax(T * self.cos(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Dict ) -> Union[str, Any]:
_UpperCamelCase : str = W_supports["sizes"].tolist()
_UpperCamelCase : Any = W_supports["start_token_id"].item()
_UpperCamelCase : Optional[Any] = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_UpperCamelCase : str = self.BERT(**__a )
_UpperCamelCase : int = self.BERT(**__a )
_UpperCamelCase : int = None
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : List[Any] = W_supports["input_ids"] == start_token_id
_UpperCamelCase : Optional[int] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Any = support_sizes[i - 1]
_UpperCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]]
_UpperCamelCase : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_UpperCamelCase : List[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_UpperCamelCase : Any = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_UpperCamelCase : Any = torch.vstack((p_starts, p_start) )
_UpperCamelCase : Any = torch.vstack((p_ends, p_end) )
else:
_UpperCamelCase : Optional[Any] = p_start
_UpperCamelCase : str = p_end
return p_starts, p_ends
| 310
| 0
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowerCamelCase__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = "sequence-classification"
def __init__( self : str , __a : int ) -> Dict:
if type(_lowercase ) == dict:
_UpperCamelCase : List[str] = Namespace(**_lowercase )
_UpperCamelCase : Optional[Any] = glue_output_modes[hparams.task]
_UpperCamelCase : int = glue_tasks_num_labels[hparams.task]
super().__init__(_lowercase , _lowercase , self.mode )
def __SCREAMING_SNAKE_CASE ( self : Tuple , **__a : List[str] ) -> int:
return self.model(**_lowercase )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int , __a : Any ) -> Dict:
_UpperCamelCase : List[Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCamelCase : List[str] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
_UpperCamelCase : Dict = self(**_lowercase )
_UpperCamelCase : str = outputs[0]
_UpperCamelCase : Any = self.trainer.lr_schedulers[0]["scheduler"]
_UpperCamelCase : Optional[int] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
_UpperCamelCase : Optional[Any] = self.hparams
_UpperCamelCase : Optional[Any] = processors[args.task]()
_UpperCamelCase : int = processor.get_labels()
for mode in ["train", "dev"]:
_UpperCamelCase : Optional[Any] = self._feature_file(_lowercase )
if os.path.exists(_lowercase ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , _lowercase )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
_UpperCamelCase : List[Any] = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
_UpperCamelCase : Optional[int] = convert_examples_to_features(
_lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , _lowercase )
torch.save(_lowercase , _lowercase )
def __SCREAMING_SNAKE_CASE ( self : int , __a : str , __a : int , __a : bool = False ) -> int:
_UpperCamelCase : int = "dev" if mode == "test" else mode
_UpperCamelCase : Dict = self._feature_file(_lowercase )
logger.info("Loading features from cached file %s" , _lowercase )
_UpperCamelCase : Union[str, Any] = torch.load(_lowercase )
_UpperCamelCase : Optional[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_UpperCamelCase : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_UpperCamelCase : Optional[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_UpperCamelCase : Dict = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_UpperCamelCase : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(_lowercase , _lowercase , _lowercase , _lowercase ) , batch_size=_lowercase , shuffle=_lowercase , )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : Any ) -> List[str]:
_UpperCamelCase : Any = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCamelCase : Dict = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
_UpperCamelCase : Any = self(**_lowercase )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = outputs[:2]
_UpperCamelCase : Union[str, Any] = logits.detach().cpu().numpy()
_UpperCamelCase : Dict = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __SCREAMING_SNAKE_CASE ( self : str , __a : int ) -> Optional[Any]:
_UpperCamelCase : Any = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
_UpperCamelCase : Optional[int] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_UpperCamelCase : int = np.argmax(_lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_UpperCamelCase : Any = np.squeeze(_lowercase )
_UpperCamelCase : List[str] = np.concatenate([x["target"] for x in outputs] , axis=0 )
_UpperCamelCase : str = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCamelCase : Tuple = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , _lowercase , _lowercase )}
_UpperCamelCase : Union[str, Any] = dict(results.items() )
_UpperCamelCase : Dict = results
return ret, preds_list, out_label_list
def __SCREAMING_SNAKE_CASE ( self : int , __a : list ) -> int:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : int = self._eval_end(_lowercase )
_UpperCamelCase : List[Any] = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : int = self._eval_end(_lowercase )
_UpperCamelCase : Union[str, Any] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : Union[str, Any] , __a : Dict ) -> Union[str, Any]:
BaseTransformer.add_model_specific_args(_lowercase , _lowercase )
parser.add_argument(
"--max_seq_length" , default=128 , type=_lowercase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=_lowercase , required=_lowercase , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=_lowercase , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def lowercase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase : str = argparse.ArgumentParser()
add_generic_args(snake_case_ ,os.getcwd() )
_UpperCamelCase : Dict = GLUETransformer.add_model_specific_args(snake_case_ ,os.getcwd() )
_UpperCamelCase : Optional[int] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_UpperCamelCase : Optional[int] = os.path.join(
"./results" ,F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' ,)
os.makedirs(args.output_dir )
_UpperCamelCase : Dict = GLUETransformer(snake_case_ )
_UpperCamelCase : Optional[int] = generic_train(snake_case_ ,snake_case_ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_UpperCamelCase : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir ,"checkpoint-epoch=*.ckpt" ) ,recursive=snake_case_ ) )
_UpperCamelCase : List[Any] = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(snake_case_ )
if __name__ == "__main__":
main()
| 370
|
"""simple docstring"""
from typing import Any
def lowercase__ ( lowercase_ ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
_UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list]
_UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 0
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase__ = imread(R"digital_image_processing/image_data/lena_small.jpg")
lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = cn.convert_to_negative(a__ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(a__ ,110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = canny.gen_gaussian_kernel(9 ,sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = imread("digital_image_processing/image_data/lena_small.jpg" ,0 )
# assert ambiguous array for all == True
assert canny_img.all()
_UpperCamelCase : List[Any] = canny.canny(a__ )
# assert canny array for at least one True
assert canny_array.any()
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
assert gg.gaussian_filter(a__ ,5 ,sigma=0.9 ).all()
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_UpperCamelCase : Union[str, Any] = conv.img_convolve(a__ ,a__ ).astype(a__ )
assert res.any()
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
assert med.median_filter(a__ ,3 ).any()
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase : Any = sob.sobel_filter(a__ )
assert grad.any() and theta.any()
def lowercase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = sp.make_sepia(a__ ,20 )
assert sepia.all()
def lowercase__ ( lowercase_ = "digital_image_processing/image_data/lena_small.jpg" ) -> Any:
"""simple docstring"""
_UpperCamelCase : Optional[int] = bs.Burkes(imread(a__ ,1 ) ,120 )
burkes.process()
assert burkes.output_img.any()
def lowercase__ ( lowercase_ = "digital_image_processing/image_data/lena_small.jpg" ,) -> List[str]:
"""simple docstring"""
_UpperCamelCase : Dict = rs.NearestNeighbour(imread(a__ ,1 ) ,400 ,200 )
nn.process()
assert nn.output.any()
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[str] = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
_UpperCamelCase : Union[str, Any] = imread(a__ ,0 )
# Test for get_neighbors_pixel function() return not None
_UpperCamelCase : Dict = 0
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : List[str] = image[x_coordinate][y_coordinate]
_UpperCamelCase : List[str] = lbp.get_neighbors_pixel(
a__ ,a__ ,a__ ,a__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_UpperCamelCase : Optional[Any] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 ,image.shape[0] ):
for j in range(0 ,image.shape[1] ):
_UpperCamelCase : Tuple = lbp.local_binary_value(a__ ,a__ ,a__ )
assert lbp_image.any()
| 371
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCamelCase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "rag"
SCREAMING_SNAKE_CASE__ :List[str] = True
def __init__( self : List[Any] , __a : Optional[Any]=None , __a : str=True , __a : Tuple=None , __a : Dict=None , __a : Optional[int]=None , __a : Optional[int]=None , __a : List[Any]=None , __a : Dict=" / " , __a : int=" // " , __a : Optional[Any]=5 , __a : Dict=300 , __a : Optional[int]=768 , __a : Tuple=8 , __a : Union[str, Any]="wiki_dpr" , __a : Dict="train" , __a : List[Any]="compressed" , __a : str=None , __a : Tuple=None , __a : int=False , __a : str=False , __a : Optional[int]=0.0 , __a : Dict=True , __a : Tuple=False , __a : Dict=False , __a : str=False , __a : str=True , __a : Optional[Any]=None , **__a : Tuple , ) -> Any:
super().__init__(
bos_token_id=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , is_encoder_decoder=__a , prefix=__a , vocab_size=__a , **__a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_UpperCamelCase : Optional[int] = kwargs.pop("question_encoder" )
_UpperCamelCase : str = question_encoder_config.pop("model_type" )
_UpperCamelCase : Tuple = kwargs.pop("generator" )
_UpperCamelCase : str = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_UpperCamelCase : Union[str, Any] = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : str = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : Optional[int] = reduce_loss
_UpperCamelCase : str = label_smoothing
_UpperCamelCase : int = exclude_bos_score
_UpperCamelCase : List[str] = do_marginalize
_UpperCamelCase : Optional[int] = title_sep
_UpperCamelCase : Optional[int] = doc_sep
_UpperCamelCase : Union[str, Any] = n_docs
_UpperCamelCase : Tuple = max_combined_length
_UpperCamelCase : Union[str, Any] = dataset
_UpperCamelCase : Any = dataset_split
_UpperCamelCase : List[str] = index_name
_UpperCamelCase : int = retrieval_vector_size
_UpperCamelCase : str = retrieval_batch_size
_UpperCamelCase : Dict = passages_path
_UpperCamelCase : str = index_path
_UpperCamelCase : Tuple = use_dummy_dataset
_UpperCamelCase : Union[str, Any] = output_retrieved
_UpperCamelCase : Optional[Any] = do_deduplication
_UpperCamelCase : str = use_cache
if self.forced_eos_token_id is None:
_UpperCamelCase : List[str] = getattr(self.generator , "forced_eos_token_id" , __a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Dict = copy.deepcopy(self.__dict__ )
_UpperCamelCase : List[Any] = self.question_encoder.to_dict()
_UpperCamelCase : Tuple = self.generator.to_dict()
_UpperCamelCase : Any = self.__class__.model_type
return output
| 310
| 0
|
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCamelCase__ = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any=32 ) -> int:
set_seed(0 )
_UpperCamelCase : Optional[int] = UNetaDModel(sample_size=_A , in_channels=3 , out_channels=3 )
_UpperCamelCase : Union[str, Any] = torch.optim.SGD(model.parameters() , lr=0.00_01 )
return model, optimizer
@slow
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
_UpperCamelCase : List[Any] = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_UpperCamelCase : int = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule="linear" , clip_sample=_A , )
_UpperCamelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule="linear" , clip_sample=_A , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_UpperCamelCase : Any = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(_A ) for _ in range(4 )]
_UpperCamelCase : Optional[int] = [torch.randn((4, 3, 32, 32) ).to(_A ) for _ in range(4 )]
_UpperCamelCase : List[str] = [torch.randint(0 , 1000 , (4,) ).long().to(_A ) for _ in range(4 )]
# train with a DDPM scheduler
_UpperCamelCase : Dict = self.get_model_optimizer(resolution=32 )
model.train().to(_A )
for i in range(4 ):
optimizer.zero_grad()
_UpperCamelCase : List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_UpperCamelCase : str = model(_A , timesteps[i] ).sample
_UpperCamelCase : Dict = torch.nn.functional.mse_loss(_A , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_UpperCamelCase : List[str] = self.get_model_optimizer(resolution=32 )
model.train().to(_A )
for i in range(4 ):
optimizer.zero_grad()
_UpperCamelCase : Any = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_UpperCamelCase : int = model(_A , timesteps[i] ).sample
_UpperCamelCase : Tuple = torch.nn.functional.mse_loss(_A , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(_A , _A , atol=1e-5 ) )
self.assertTrue(torch.allclose(_A , _A , atol=1e-5 ) )
| 350
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : List[Any] , __a : str=13 , __a : Any=30 , __a : List[str]=2 , __a : Dict=3 , __a : Union[str, Any]=True , __a : Dict=True , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : str=0.1 , __a : Optional[int]=0.1 , __a : Union[str, Any]=10 , __a : Optional[Any]=0.02 , __a : List[Any]=None , __a : str=2 , ) -> int:
_UpperCamelCase : Tuple = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : int = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = type_sequence_label_size
_UpperCamelCase : int = initializer_range
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Any = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCamelCase : Optional[int] = num_patches + 1
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Union[str, Any] = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Any = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = ViTModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[int] , __a : int ) -> Optional[int]:
_UpperCamelCase : Tuple = ViTForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Union[str, Any] = ViTForMaskedImageModeling(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : Dict = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : int , __a : Dict ) -> int:
_UpperCamelCase : Any = self.type_sequence_label_size
_UpperCamelCase : Optional[Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : int = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Union[str, Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : List[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : Union[str, Any] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :Any = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :str = True
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Dict = ViTModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__a )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[str] = [*signature.parameters.keys()]
_UpperCamelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[str] = ViTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : List[Any] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__a )
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[Any] = prepare_img()
_UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : Dict = model(**__a )
# verify the logits
_UpperCamelCase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_UpperCamelCase : List[str] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__a )
_UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : str = model(__a , interpolate_pos_encoding=__a )
# verify the logits
_UpperCamelCase : int = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , __a )
_UpperCamelCase : int = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
_UpperCamelCase : int = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : Union[str, Any] = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCamelCase : int = model(__a )
| 310
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.