code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "M-CLIP"
def __init__( self : Optional[Any] , _A : List[str]=1_0_2_4 , _A : Union[str, Any]=7_6_8 , **_A : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = transformerDimSize
_SCREAMING_SNAKE_CASE : List[str] = imageDimSize
super().__init__(**_A)
class _snake_case ( __snake_case ):
"""simple docstring"""
a = MCLIPConfig
def __init__( self : Dict , _A : Optional[Any] , *_A : Any , **_A : Dict):
"""simple docstring"""
super().__init__(_A , *_A , **_A)
_SCREAMING_SNAKE_CASE : Tuple = XLMRobertaModel(_A)
_SCREAMING_SNAKE_CASE : List[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims)
def _lowerCAmelCase ( self : Union[str, Any] , _A : str , _A : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.transformer(input_ids=_A , attention_mask=_A)[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
return self.LinearTransformation(_A), embs
| 635 | """simple docstring"""
from __future__ import annotations
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 635 | 1 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
set_seed(770)
lowerCAmelCase_ = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowerCAmelCase_ = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowerCAmelCase_ = os.path.dirname(os.path.abspath(__file__))
lowerCAmelCase_ = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowerCAmelCase_ = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = model_type
if use_small:
key += "_small"
return os.path.join(__SCREAMING_SNAKE_CASE , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str:
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
hf_hub_download(repo_id=__SCREAMING_SNAKE_CASE , filename=__SCREAMING_SNAKE_CASE , local_dir=__SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="text" )-> Tuple:
if model_type == "text":
_SCREAMING_SNAKE_CASE : Dict = BarkSemanticModel
_SCREAMING_SNAKE_CASE : Optional[int] = BarkSemanticConfig
_SCREAMING_SNAKE_CASE : Any = BarkSemanticGenerationConfig
elif model_type == "coarse":
_SCREAMING_SNAKE_CASE : Tuple = BarkCoarseModel
_SCREAMING_SNAKE_CASE : Any = BarkCoarseConfig
_SCREAMING_SNAKE_CASE : Dict = BarkCoarseGenerationConfig
elif model_type == "fine":
_SCREAMING_SNAKE_CASE : List[str] = BarkFineModel
_SCREAMING_SNAKE_CASE : str = BarkFineConfig
_SCREAMING_SNAKE_CASE : Optional[Any] = BarkFineGenerationConfig
else:
raise NotImplementedError()
_SCREAMING_SNAKE_CASE : Any = F"""{model_type}_small""" if use_small else model_type
_SCREAMING_SNAKE_CASE : Optional[int] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__SCREAMING_SNAKE_CASE ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__SCREAMING_SNAKE_CASE , map_location=__SCREAMING_SNAKE_CASE )
# this is a hack
_SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
_SCREAMING_SNAKE_CASE : List[Any] = model_args["""vocab_size"""]
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_SCREAMING_SNAKE_CASE : str = model_args.pop("""n_head""" )
_SCREAMING_SNAKE_CASE : Dict = model_args.pop("""n_embd""" )
_SCREAMING_SNAKE_CASE : str = model_args.pop("""n_layer""" )
_SCREAMING_SNAKE_CASE : List[str] = ConfigClass(**checkpoint["""model_args"""] )
_SCREAMING_SNAKE_CASE : Dict = ModelClass(config=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : str = GenerationConfigClass()
_SCREAMING_SNAKE_CASE : Optional[int] = model_generation_config
_SCREAMING_SNAKE_CASE : Dict = checkpoint["""model"""]
# fixup checkpoint
_SCREAMING_SNAKE_CASE : Any = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(__SCREAMING_SNAKE_CASE ):
# replace part of the key with corresponding layer name in HF implementation
_SCREAMING_SNAKE_CASE : Any = k[len(__SCREAMING_SNAKE_CASE ) :]
for old_layer_name in new_layer_name_dict:
_SCREAMING_SNAKE_CASE : List[Any] = new_k.replace(__SCREAMING_SNAKE_CASE , new_layer_name_dict[old_layer_name] )
_SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[str] = set(state_dict.keys() ) - set(model.state_dict().keys() )
_SCREAMING_SNAKE_CASE : Optional[int] = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
_SCREAMING_SNAKE_CASE : List[str] = set(model.state_dict().keys() ) - set(state_dict.keys() )
_SCREAMING_SNAKE_CASE : Optional[Any] = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = model.num_parameters(exclude_embeddings=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = checkpoint["""best_val_loss"""].item()
logger.info(F"""model loaded: {round(n_params/1e6 , 1 )}M params, {round(__SCREAMING_SNAKE_CASE , 3 )} loss""" )
model.eval()
model.to(__SCREAMING_SNAKE_CASE )
del checkpoint, state_dict
return model
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="text" )-> str:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_SCREAMING_SNAKE_CASE : Any = """cpu""" # do conversion on cpu
_SCREAMING_SNAKE_CASE : List[str] = _get_ckpt_path(__SCREAMING_SNAKE_CASE , use_small=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = _load_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , model_type=__SCREAMING_SNAKE_CASE , use_small=__SCREAMING_SNAKE_CASE )
# load bark initial model
_SCREAMING_SNAKE_CASE : List[Any] = _bark_load_model(__SCREAMING_SNAKE_CASE , """cpu""" , model_type=__SCREAMING_SNAKE_CASE , use_small=__SCREAMING_SNAKE_CASE )
if model_type == "text":
_SCREAMING_SNAKE_CASE : Dict = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=__SCREAMING_SNAKE_CASE ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
_SCREAMING_SNAKE_CASE : Any = 5
_SCREAMING_SNAKE_CASE : int = 10
if model_type in ["text", "coarse"]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
_SCREAMING_SNAKE_CASE : List[Any] = bark_model(__SCREAMING_SNAKE_CASE )[0]
_SCREAMING_SNAKE_CASE : Optional[int] = model(__SCREAMING_SNAKE_CASE )
# take last logits
_SCREAMING_SNAKE_CASE : Optional[int] = output_new_model_total.logits[:, [-1], :]
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = 3
_SCREAMING_SNAKE_CASE : str = 8
_SCREAMING_SNAKE_CASE : int = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_SCREAMING_SNAKE_CASE : List[str] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Optional[Any] = bark_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Optional[int] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> int:
_SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : int = BarkSemanticConfig.from_pretrained(os.path.join(__SCREAMING_SNAKE_CASE , """config.json""" ) )
_SCREAMING_SNAKE_CASE : List[str] = BarkCoarseConfig.from_pretrained(os.path.join(__SCREAMING_SNAKE_CASE , """config.json""" ) )
_SCREAMING_SNAKE_CASE : List[Any] = BarkFineConfig.from_pretrained(os.path.join(__SCREAMING_SNAKE_CASE , """config.json""" ) )
_SCREAMING_SNAKE_CASE : List[str] = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = BarkSemanticModel.from_pretrained(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = BarkCoarseModel.from_pretrained(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = BarkFineModel.from_pretrained(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
_SCREAMING_SNAKE_CASE : Any = BarkConfig.from_sub_model_configs(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[str] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_SCREAMING_SNAKE_CASE : str = BarkModel(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[str] = semantic
_SCREAMING_SNAKE_CASE : Optional[int] = coarseAcoustic
_SCREAMING_SNAKE_CASE : Optional[Any] = fineAcoustic
_SCREAMING_SNAKE_CASE : Optional[int] = codec
_SCREAMING_SNAKE_CASE : str = bark_generation_config
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
bark.save_pretrained(__SCREAMING_SNAKE_CASE , repo_id=__SCREAMING_SNAKE_CASE , push_to_hub=__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowerCAmelCase_ = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 635 | """simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 )-> str:
_SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetDict(
{
"""train""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ),
"""validation""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_SCREAMING_SNAKE_CASE : str = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_SCREAMING_SNAKE_CASE : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_SCREAMING_SNAKE_CASE : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_SCREAMING_SNAKE_CASE : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
_SCREAMING_SNAKE_CASE : Any = 8
else:
_SCREAMING_SNAKE_CASE : Optional[int] = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = DataLoader(
tokenized_datasets["""test"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader, test_dataloader
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
# New Code #
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
# Download the dataset
_SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("""glue""" , """mrpc""" )
# Create our splits
_SCREAMING_SNAKE_CASE : Dict = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_SCREAMING_SNAKE_CASE : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE : Tuple = config["""lr"""]
_SCREAMING_SNAKE_CASE : Tuple = int(config["""num_epochs"""] )
_SCREAMING_SNAKE_CASE : int = int(config["""seed"""] )
_SCREAMING_SNAKE_CASE : int = int(config["""batch_size"""] )
_SCREAMING_SNAKE_CASE : List[str] = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_SCREAMING_SNAKE_CASE : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_SCREAMING_SNAKE_CASE : List[str] = batch_size // MAX_GPU_BATCH_SIZE
_SCREAMING_SNAKE_CASE : List[str] = MAX_GPU_BATCH_SIZE
set_seed(__SCREAMING_SNAKE_CASE )
# New Code #
# Create our folds:
_SCREAMING_SNAKE_CASE : List[str] = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_SCREAMING_SNAKE_CASE : Optional[Any] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = get_fold_dataloaders(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_SCREAMING_SNAKE_CASE : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_SCREAMING_SNAKE_CASE : int = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
_SCREAMING_SNAKE_CASE : int = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(**__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = outputs.loss
_SCREAMING_SNAKE_CASE : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , )
_SCREAMING_SNAKE_CASE : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , __SCREAMING_SNAKE_CASE )
# New Code #
# We also run predictions on the test set at the very end
_SCREAMING_SNAKE_CASE : str = []
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 )
_SCREAMING_SNAKE_CASE : List[str] = torch.stack(__SCREAMING_SNAKE_CASE , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_SCREAMING_SNAKE_CASE : int = metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )
accelerator.print("""Average test metrics from all folds:""" , __SCREAMING_SNAKE_CASE )
def lowerCamelCase_()-> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=__SCREAMING_SNAKE_CASE , default=3 , help="""The number of splits to perform across the dataset""" )
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
_SCREAMING_SNAKE_CASE : Optional[int] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 635 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[Any]:
_SCREAMING_SNAKE_CASE : Tuple = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["""stage2""", """stage3""", """stage4"""] , )
_SCREAMING_SNAKE_CASE : Optional[Any] = DetaConfig(
backbone_config=__SCREAMING_SNAKE_CASE , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=__SCREAMING_SNAKE_CASE , with_box_refine=__SCREAMING_SNAKE_CASE , two_stage=__SCREAMING_SNAKE_CASE , )
# set labels
_SCREAMING_SNAKE_CASE : List[Any] = """huggingface/label-files"""
if "o365" in model_name:
_SCREAMING_SNAKE_CASE : Tuple = 366
_SCREAMING_SNAKE_CASE : List[Any] = """object365-id2label.json"""
else:
_SCREAMING_SNAKE_CASE : Any = 91
_SCREAMING_SNAKE_CASE : Optional[Any] = """coco-detection-id2label.json"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
_SCREAMING_SNAKE_CASE : List[str] = json.load(open(cached_download(hf_hub_url(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) ) , """r""" ) )
_SCREAMING_SNAKE_CASE : Any = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE : int = idalabel
_SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Dict:
_SCREAMING_SNAKE_CASE : Dict = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.reduction.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.bias""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") )
rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") )
rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") )
rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") )
rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") )
rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", F"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", F"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", F"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", F"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", F"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", F"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.weight""", F"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.weight""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.weight""", F"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.bias""", F"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple:
_SCREAMING_SNAKE_CASE : Optional[int] = dct.pop(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = val
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_SCREAMING_SNAKE_CASE : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE : Dict = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[:dim, :]
_SCREAMING_SNAKE_CASE : Any = in_proj_bias[: dim]
_SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE : Any = in_proj_bias[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[
-dim :, :
]
_SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple:
# transformer decoder self-attention layers
_SCREAMING_SNAKE_CASE : int = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
_SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE : Any = in_proj_weight[:hidden_size, :]
_SCREAMING_SNAKE_CASE : str = in_proj_bias[:hidden_size]
_SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE : int = in_proj_bias[hidden_size : hidden_size * 2]
_SCREAMING_SNAKE_CASE : str = in_proj_weight[-hidden_size:, :]
_SCREAMING_SNAKE_CASE : str = in_proj_bias[-hidden_size:]
def lowerCamelCase_()-> Tuple:
_SCREAMING_SNAKE_CASE : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = get_deta_config(__SCREAMING_SNAKE_CASE )
# load original state dict
if model_name == "deta-swin-large":
_SCREAMING_SNAKE_CASE : str = hf_hub_download(repo_id="""nielsr/deta-checkpoints""" , filename="""adet_swin_ft.pth""" )
elif model_name == "deta-swin-large-o365":
_SCREAMING_SNAKE_CASE : List[Any] = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" , filename="""deta_swin_pt_o365.pth""" )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
_SCREAMING_SNAKE_CASE : List[Any] = torch.load(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""]
# original state dict
for name, param in state_dict.items():
print(__SCREAMING_SNAKE_CASE , param.shape )
# rename keys
_SCREAMING_SNAKE_CASE : Tuple = create_rename_keys(__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_swin_q_k_v(__SCREAMING_SNAKE_CASE , config.backbone_config )
read_in_decoder_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : str = val
if "input_proj" in key:
_SCREAMING_SNAKE_CASE : int = state_dict.pop(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : int = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_SCREAMING_SNAKE_CASE : Dict = state_dict.pop(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = val
# finally, create HuggingFace model and load state dict
_SCREAMING_SNAKE_CASE : int = DetaForObjectDetection(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
model.eval()
_SCREAMING_SNAKE_CASE : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(__SCREAMING_SNAKE_CASE )
# load image processor
_SCREAMING_SNAKE_CASE : Dict = DetaImageProcessor(format="""coco_detection""" )
# verify our conversion on image
_SCREAMING_SNAKE_CASE : Any = prepare_img()
_SCREAMING_SNAKE_CASE : Union[str, Any] = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE : Tuple = encoding["""pixel_values"""]
_SCREAMING_SNAKE_CASE : Optional[Any] = model(pixel_values.to(__SCREAMING_SNAKE_CASE ) )
# verify logits
print("""Logits:""" , outputs.logits[0, :3, :3] )
print("""Boxes:""" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[-7.63_08, -2.84_85, -5.37_37], [-7.20_37, -4.55_05, -4.80_27], [-7.29_43, -4.26_11, -4.66_17]] )
_SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.49_87, 0.49_69, 0.99_99], [0.25_49, 0.54_98, 0.48_05], [0.54_98, 0.27_57, 0.05_69]] )
elif model_name == "deta-swin-large-o365":
_SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-8.01_22, -3.57_20, -4.97_17], [-8.15_47, -3.68_86, -4.63_89], [-7.66_10, -3.61_94, -5.01_34]] )
_SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.25_23, 0.55_49, 0.48_81], [0.77_15, 0.41_49, 0.46_01], [0.55_03, 0.27_53, 0.05_75]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__SCREAMING_SNAKE_CASE ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__SCREAMING_SNAKE_CASE ) , atol=1e-4 )
print("""Everything ok!""" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
# Push to hub
if push_to_hub:
print("""Pushing model and processor to hub...""" )
model.push_to_hub(F"""jozhang97/{model_name}""" )
processor.push_to_hub(F"""jozhang97/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 635 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 635 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> tuple:
_SCREAMING_SNAKE_CASE : Any = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 635 | """simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _snake_case :
"""simple docstring"""
def __init__( self : int , _A : List[Any] , _A : int , _A : int):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""")
_SCREAMING_SNAKE_CASE : str = img
_SCREAMING_SNAKE_CASE : Optional[Any] = img.shape[1]
_SCREAMING_SNAKE_CASE : Tuple = img.shape[0]
_SCREAMING_SNAKE_CASE : Any = dst_width
_SCREAMING_SNAKE_CASE : Any = dst_height
_SCREAMING_SNAKE_CASE : Any = self.src_w / self.dst_w
_SCREAMING_SNAKE_CASE : Dict = self.src_h / self.dst_h
_SCREAMING_SNAKE_CASE : Optional[Any] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 2_5_5
)
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
for i in range(self.dst_h):
for j in range(self.dst_w):
_SCREAMING_SNAKE_CASE : Any = self.img[self.get_y(_A)][self.get_x(_A)]
def _lowerCAmelCase ( self : int , _A : int):
"""simple docstring"""
return int(self.ratio_x * x)
def _lowerCAmelCase ( self : str , _A : int):
"""simple docstring"""
return int(self.ratio_y * y)
if __name__ == "__main__":
lowerCAmelCase_ , lowerCAmelCase_ = 800, 600
lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1)
lowerCAmelCase_ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output
)
waitKey(0)
destroyAllWindows()
| 635 | 1 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _snake_case ( __snake_case ):
"""simple docstring"""
a = ["pixel_values"]
def __init__( self : Union[str, Any] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 2_5_5 , _A : bool = True , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_A : str , ):
"""simple docstring"""
super().__init__(**_A)
_SCREAMING_SNAKE_CASE : Any = size if size is not None else {"""shortest_edge""": 2_2_4}
_SCREAMING_SNAKE_CASE : Any = get_size_dict(_A , default_to_square=_A)
_SCREAMING_SNAKE_CASE : List[Any] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
_SCREAMING_SNAKE_CASE : Dict = get_size_dict(_A , param_name="""crop_size""")
_SCREAMING_SNAKE_CASE : Optional[Any] = do_resize
_SCREAMING_SNAKE_CASE : Optional[Any] = size
_SCREAMING_SNAKE_CASE : Tuple = resample
_SCREAMING_SNAKE_CASE : str = do_center_crop
_SCREAMING_SNAKE_CASE : Dict = crop_size
_SCREAMING_SNAKE_CASE : List[str] = do_rescale
_SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor
_SCREAMING_SNAKE_CASE : Union[str, Any] = do_normalize
_SCREAMING_SNAKE_CASE : Any = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_SCREAMING_SNAKE_CASE : Optional[int] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = get_size_dict(_A , default_to_square=_A)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_SCREAMING_SNAKE_CASE : Any = int((2_5_6 / 2_2_4) * size["""shortest_edge"""])
_SCREAMING_SNAKE_CASE : Optional[Any] = get_resize_output_image_size(_A , size=_A , default_to_square=_A)
_SCREAMING_SNAKE_CASE : Optional[int] = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""")
return resize(
_A , size=(size_dict["""height"""], size_dict["""width"""]) , resample=_A , data_format=_A , **_A)
def _lowerCAmelCase ( self : List[str] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = get_size_dict(_A)
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(_A , size=(size["""height"""], size["""width"""]) , data_format=_A , **_A)
def _lowerCAmelCase ( self : str , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ):
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A)
def _lowerCAmelCase ( self : Any , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ):
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A)
def _lowerCAmelCase ( self : int , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = None , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[TensorType] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Tuple , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE : Dict = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_SCREAMING_SNAKE_CASE : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE : List[str] = image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE : List[str] = size if size is not None else self.size
_SCREAMING_SNAKE_CASE : Dict = get_size_dict(_A , default_to_square=_A)
_SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else self.crop_size
_SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(_A , param_name="""crop_size""")
_SCREAMING_SNAKE_CASE : List[Any] = make_list_of_images(_A)
if not valid_images(_A):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE : Optional[int] = [to_numpy_array(_A) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE : List[str] = [self.resize(_A , _A , _A) for image in images]
if do_center_crop:
_SCREAMING_SNAKE_CASE : Tuple = [self.center_crop(_A , _A) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE : Any = [self.rescale(_A , _A) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.normalize(_A , _A , _A) for image in images]
_SCREAMING_SNAKE_CASE : Any = [to_channel_dimension_format(_A , _A) for image in images]
_SCREAMING_SNAKE_CASE : Dict = {"""pixel_values""": images}
return BatchFeature(data=_A , tensor_type=_A)
| 635 | """simple docstring"""
import argparse
from collections import defaultdict
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int:
_SCREAMING_SNAKE_CASE : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines()
_SCREAMING_SNAKE_CASE : Optional[Any] = F"""class {class_name}("""
_SCREAMING_SNAKE_CASE : List[Any] = F"""{4 * " "}def {test_name}("""
_SCREAMING_SNAKE_CASE : Tuple = F"""{8 * " "}{correct_line.split()[0]}"""
_SCREAMING_SNAKE_CASE : List[Any] = F"""{16 * " "}{correct_line.split()[0]}"""
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
_SCREAMING_SNAKE_CASE : Dict = []
for line in lines:
if line.startswith(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = True
elif in_class and line.startswith(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = True
elif in_class and in_func and (line.startswith(__SCREAMING_SNAKE_CASE ) or line.startswith(__SCREAMING_SNAKE_CASE )):
_SCREAMING_SNAKE_CASE : Dict = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_SCREAMING_SNAKE_CASE : int = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_SCREAMING_SNAKE_CASE : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_SCREAMING_SNAKE_CASE : Optional[int] = False
else:
new_lines.append(__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , """w""" ) as f:
for line in new_lines:
f.write(__SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None )-> Optional[Any]:
if fail is not None:
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {l.strip() for l in f.readlines()}
else:
_SCREAMING_SNAKE_CASE : str = None
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
_SCREAMING_SNAKE_CASE : str = f.readlines()
_SCREAMING_SNAKE_CASE : str = defaultdict(__SCREAMING_SNAKE_CASE )
for line in correct_lines:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
lowerCAmelCase_ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 635 | 1 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class _snake_case :
"""simple docstring"""
def __init__( self : Tuple , _A : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = str(id_)
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Dict = None
_SCREAMING_SNAKE_CASE : str = []
_SCREAMING_SNAKE_CASE : Any = {} # {vertex:distance}
def __lt__( self : int , _A : str):
"""simple docstring"""
return self.key < other.key
def __repr__( self : Dict):
"""simple docstring"""
return self.id
def _lowerCAmelCase ( self : Optional[int] , _A : str):
"""simple docstring"""
self.neighbors.append(_A)
def _lowerCAmelCase ( self : Union[str, Any] , _A : Any , _A : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = weight
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __SCREAMING_SNAKE_CASE )
graph[b - 1].add_edge(graph[a - 1] , __SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> list:
_SCREAMING_SNAKE_CASE : str = []
for u in graph:
_SCREAMING_SNAKE_CASE : int = math.inf
_SCREAMING_SNAKE_CASE : Tuple = None
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : Union[str, Any] = graph[:]
while q:
_SCREAMING_SNAKE_CASE : Optional[int] = min(__SCREAMING_SNAKE_CASE )
q.remove(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_SCREAMING_SNAKE_CASE : Union[str, Any] = u
_SCREAMING_SNAKE_CASE : Any = u.edges[v.id]
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Iterator[tuple]:
for u in graph:
_SCREAMING_SNAKE_CASE : str = math.inf
_SCREAMING_SNAKE_CASE : Dict = None
_SCREAMING_SNAKE_CASE : Dict = 0
_SCREAMING_SNAKE_CASE : Tuple = list(__SCREAMING_SNAKE_CASE )
hq.heapify(__SCREAMING_SNAKE_CASE )
while h:
_SCREAMING_SNAKE_CASE : Dict = hq.heappop(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_SCREAMING_SNAKE_CASE : int = u
_SCREAMING_SNAKE_CASE : Optional[int] = u.edges[v.id]
hq.heapify(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowerCamelCase_()-> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 635 | """simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCAmelCase_ = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> str:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = create_model(
"""HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : Optional[Any] = R""".*sequential.(\d+).*"""
_SCREAMING_SNAKE_CASE : Any = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# replace sequential layers with list
_SCREAMING_SNAKE_CASE : List[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 )
_SCREAMING_SNAKE_CASE : Dict = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.""" )
elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[str] = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_SCREAMING_SNAKE_CASE : Dict = 1 if projecton_layer == 0 else 2
_SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
_SCREAMING_SNAKE_CASE : Dict = value
_SCREAMING_SNAKE_CASE : List[Any] = mixed_qkv.size(0 ) // 3
_SCREAMING_SNAKE_CASE : Optional[Any] = mixed_qkv[:qkv_dim]
_SCREAMING_SNAKE_CASE : str = mixed_qkv[qkv_dim : qkv_dim * 2]
_SCREAMING_SNAKE_CASE : Any = mixed_qkv[qkv_dim * 2 :]
_SCREAMING_SNAKE_CASE : Dict = query_layer
_SCREAMING_SNAKE_CASE : List[Any] = key_layer
_SCREAMING_SNAKE_CASE : Dict = value_layer
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = value
return model_state_dict
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> List[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE )
clap_model.eval()
_SCREAMING_SNAKE_CASE : Dict = clap_model.state_dict()
_SCREAMING_SNAKE_CASE : Tuple = rename_state_dict(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : int = ClapConfig()
_SCREAMING_SNAKE_CASE : Tuple = enable_fusion
_SCREAMING_SNAKE_CASE : Dict = ClapModel(__SCREAMING_SNAKE_CASE )
# ignore the spectrogram embedding layer
model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
lowerCAmelCase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 635 | 1 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_A , )
assert hasattr(self , """env""")
def _lowerCAmelCase ( self : Optional[Any] , _A : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = {
"""enabled""": True,
"""processes_per_host""": 8,
}
_SCREAMING_SNAKE_CASE : Dict = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
_SCREAMING_SNAKE_CASE : Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 5_0_0,
} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version="""py36""" , )
def _lowerCAmelCase ( self : Dict , _A : List[Any]):
"""simple docstring"""
TrainingJobAnalytics(_A).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""")
@parameterized.expand([(1,)])
def _lowerCAmelCase ( self : Tuple , _A : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = self.create_estimator(_A)
# run training
estimator.fit()
# result dataframe
_SCREAMING_SNAKE_CASE : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_SCREAMING_SNAKE_CASE : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""])
_SCREAMING_SNAKE_CASE : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_SCREAMING_SNAKE_CASE : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy)
assert all(t <= self.results["""eval_loss"""] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""") as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _A)
| 635 | """simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_A , )
assert hasattr(self , """env""")
def _lowerCAmelCase ( self : Union[str, Any] , _A : str=1):
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , )
def _lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any]):
"""simple docstring"""
TrainingJobAnalytics(_A).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""")
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_SCREAMING_SNAKE_CASE : Any = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_SCREAMING_SNAKE_CASE : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""])
_SCREAMING_SNAKE_CASE : Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_SCREAMING_SNAKE_CASE : int = (
Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy)
assert all(t <= self.results["""eval_loss"""] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""") as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _A)
| 635 | 1 |
"""simple docstring"""
from math import factorial
lowerCAmelCase_ = {str(d): factorial(d) for d in range(10)}
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
return sum(DIGIT_FACTORIAL[d] for d in str(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase_()-> int:
_SCREAMING_SNAKE_CASE : Optional[Any] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , __SCREAMING_SNAKE_CASE ) if sum_of_digit_factorial(__SCREAMING_SNAKE_CASE ) == i )
if __name__ == "__main__":
print(F"{solution() = }")
| 635 | """simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any:
return max(metric_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for gt in ground_truths )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]:
_SCREAMING_SNAKE_CASE : List[str] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()]
_SCREAMING_SNAKE_CASE : Dict = []
if args.gold_data_mode == "qa":
_SCREAMING_SNAKE_CASE : int = pd.read_csv(__SCREAMING_SNAKE_CASE , sep="""\t""" , header=__SCREAMING_SNAKE_CASE )
for answer_list in data[1]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ast.literal_eval(__SCREAMING_SNAKE_CASE )
answers.append(__SCREAMING_SNAKE_CASE )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()]
_SCREAMING_SNAKE_CASE : Optional[int] = [[reference] for reference in references]
_SCREAMING_SNAKE_CASE : Optional[int] = 0
for prediction, ground_truths in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
total += 1
em += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
fa += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = 1_00.0 * em / total
_SCREAMING_SNAKE_CASE : Optional[Any] = 1_00.0 * fa / total
logger.info(F"""F1: {fa:.2f}""" )
logger.info(F"""EM: {em:.2f}""" )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Tuple = args.k
_SCREAMING_SNAKE_CASE : int = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()]
_SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()]
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
for hypo, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = set(hypo.split("""\t""" )[:k] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_SCREAMING_SNAKE_CASE : int = 1_00.0 * em / total
logger.info(F"""Precision@{k}: {em: .2f}""" )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
def strip_title(__SCREAMING_SNAKE_CASE ):
if title.startswith("""\"""" ):
_SCREAMING_SNAKE_CASE : Optional[int] = title[1:]
if title.endswith("""\"""" ):
_SCREAMING_SNAKE_CASE : str = title[:-1]
return title
_SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , )["""input_ids"""].to(args.device )
_SCREAMING_SNAKE_CASE : List[str] = rag_model.rag.question_encoder(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Any = question_enc_outputs[0]
_SCREAMING_SNAKE_CASE : List[Any] = rag_model.retriever(
__SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
_SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
for docs in all_docs:
_SCREAMING_SNAKE_CASE : str = [strip_title(__SCREAMING_SNAKE_CASE ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(__SCREAMING_SNAKE_CASE ) )
return provenance_strings
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]:
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.input_ids.to(args.device )
_SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.attention_mask.to(args.device )
_SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.generate( # rag_model overwrites generate
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.generator_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
if args.print_predictions:
for q, a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
logger.info("""Q: {} - A: {}""".format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
return answers
def lowerCamelCase_()-> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__SCREAMING_SNAKE_CASE , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=__SCREAMING_SNAKE_CASE , choices=["""exact""", """compressed""", """legacy"""] , type=__SCREAMING_SNAKE_CASE , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=__SCREAMING_SNAKE_CASE , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__SCREAMING_SNAKE_CASE , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=__SCREAMING_SNAKE_CASE , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=__SCREAMING_SNAKE_CASE , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=__SCREAMING_SNAKE_CASE , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=__SCREAMING_SNAKE_CASE , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
_SCREAMING_SNAKE_CASE : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if args.model_type is None:
_SCREAMING_SNAKE_CASE : Optional[int] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
_SCREAMING_SNAKE_CASE : List[Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
_SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = args.index_name
if args.index_path is not None:
_SCREAMING_SNAKE_CASE : Any = args.index_path
else:
_SCREAMING_SNAKE_CASE : Any = BartForConditionalGeneration
_SCREAMING_SNAKE_CASE : int = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , __SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
_SCREAMING_SNAKE_CASE : Tuple = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(__SCREAMING_SNAKE_CASE ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
_SCREAMING_SNAKE_CASE : str = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , retriever=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
model.retriever.init_retrieval()
else:
_SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
_SCREAMING_SNAKE_CASE : str = []
for line in tqdm(__SCREAMING_SNAKE_CASE ):
questions.append(line.strip() )
if len(__SCREAMING_SNAKE_CASE ) == args.eval_batch_size:
_SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) + """\n""" )
preds_file.flush()
_SCREAMING_SNAKE_CASE : Any = []
if len(__SCREAMING_SNAKE_CASE ) > 0:
_SCREAMING_SNAKE_CASE : List[str] = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
preds_file.flush()
score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCAmelCase_ = get_args()
main(args)
| 635 | 1 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( _lowerCamelCase ):
"""simple docstring"""
a = ["image_processor", "tokenizer"]
a = "ChineseCLIPImageProcessor"
a = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Dict , _A : Optional[int]=None , _A : str=None , **_A : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , A__ , )
_SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop("""feature_extractor""")
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(A__ , A__)
_SCREAMING_SNAKE_CASE : str = self.image_processor
def __call__( self : Optional[int] , _A : Any=None , _A : Optional[Any]=None , _A : List[Any]=None , **_A : List[str]):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(A__ , return_tensors=A__ , **A__)
if images is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor(A__ , return_tensors=A__ , **A__)
if text is not None and images is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A__) , tensor_type=A__)
def _lowerCAmelCase ( self : Tuple , *_A : Optional[Any] , **_A : Optional[Any]):
"""simple docstring"""
return self.tokenizer.batch_decode(*A__ , **A__)
def _lowerCAmelCase ( self : str , *_A : Any , **_A : int):
"""simple docstring"""
return self.tokenizer.decode(*A__ , **A__)
@property
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A__ , )
return self.image_processor_class | 700 | """simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowerCamelCase_(__SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=1_026 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , __SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , )-> Union[str, Any]:
set_seed(3 )
# generate train_data and objective_set
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = generate_datasets(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , number=__SCREAMING_SNAKE_CASE , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_SCREAMING_SNAKE_CASE : Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
_SCREAMING_SNAKE_CASE : Any = load_gpta("""gpt2""" ).to(__SCREAMING_SNAKE_CASE )
print("""computing perplexity on objective set""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).item()
print("""perplexity on objective set:""" , __SCREAMING_SNAKE_CASE )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=15 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE="igf_model.pt" , )-> Optional[int]:
set_seed(42 )
# Load pre-trained model
_SCREAMING_SNAKE_CASE : Any = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
_SCREAMING_SNAKE_CASE : Union[str, Any] = SecondaryLearner(__SCREAMING_SNAKE_CASE )
# Train secondary learner
_SCREAMING_SNAKE_CASE : Any = train_secondary_learner(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_epochs=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , eval_freq=100 , igf_model_path=__SCREAMING_SNAKE_CASE , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_000 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=recopy_gpta , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Tuple = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = RandomSampler(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = DataLoader(__SCREAMING_SNAKE_CASE , sampler=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(__SCREAMING_SNAKE_CASE )) + 1
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : Any = torch.zeros((1, context_len) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = recopy_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
model.train()
if secondary_learner is not None:
secondary_learner.to(__SCREAMING_SNAKE_CASE )
secondary_learner.eval()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Optional[int] = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : int = []
# Compute the performance of the transformer model at the beginning
_SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
test_perps.append(__SCREAMING_SNAKE_CASE )
print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE )
for epoch in range(int(__SCREAMING_SNAKE_CASE ) ):
for step, example in enumerate(__SCREAMING_SNAKE_CASE ):
torch.cuda.empty_cache()
_SCREAMING_SNAKE_CASE : Any = random.randint(0 , example.size(2 ) - context_len - 1 )
_SCREAMING_SNAKE_CASE : int = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[str] = True
if secondary_learner is not None:
_SCREAMING_SNAKE_CASE : List[Any] = secondary_learner.forward(
torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__SCREAMING_SNAKE_CASE ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
_SCREAMING_SNAKE_CASE : Dict = -1
if predicted_q < threshold:
_SCREAMING_SNAKE_CASE : List[str] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_SCREAMING_SNAKE_CASE : Any = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
test_perps.append(__SCREAMING_SNAKE_CASE )
print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowerCamelCase_()-> Tuple:
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=__SCREAMING_SNAKE_CASE , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1_000 , type=__SCREAMING_SNAKE_CASE , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=128 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=__SCREAMING_SNAKE_CASE , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1_026 , type=__SCREAMING_SNAKE_CASE , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=__SCREAMING_SNAKE_CASE , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=__SCREAMING_SNAKE_CASE , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=__SCREAMING_SNAKE_CASE , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
_SCREAMING_SNAKE_CASE : Optional[int] = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
_SCREAMING_SNAKE_CASE : int = training_secondary_learner(
__SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
_SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=__SCREAMING_SNAKE_CASE , secondary_learner=__SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 635 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 701 | """simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( __snake_case ):
"""simple docstring"""
a = ["image_processor", "tokenizer"]
a = "ChineseCLIPImageProcessor"
a = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Dict , _A : Tuple=None , _A : List[Any]=None , **_A : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _A , )
_SCREAMING_SNAKE_CASE : str = kwargs.pop("""feature_extractor""")
_SCREAMING_SNAKE_CASE : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(_A , _A)
_SCREAMING_SNAKE_CASE : Dict = self.image_processor
def __call__( self : Optional[int] , _A : Optional[Any]=None , _A : Any=None , _A : Tuple=None , **_A : int):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(_A , return_tensors=_A , **_A)
if images is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(_A , return_tensors=_A , **_A)
if text is not None and images is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A) , tensor_type=_A)
def _lowerCAmelCase ( self : str , *_A : Any , **_A : Any):
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A)
def _lowerCAmelCase ( self : Union[str, Any] , *_A : List[Any] , **_A : Any):
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A)
@property
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _A , )
return self.image_processor_class
| 635 | 0 |
"""simple docstring"""
from math import factorial
def lowerCamelCase_(__SCREAMING_SNAKE_CASE = 20 )-> int:
_SCREAMING_SNAKE_CASE : int = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
_SCREAMING_SNAKE_CASE : Any = n // 2
return int(factorial(_SCREAMING_SNAKE_CASE ) / (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowerCAmelCase_ : Optional[int] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''')
| 702 | """simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = ['''model.decoder.embed_positions.weights''']
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]:
if "emb" in name:
_SCREAMING_SNAKE_CASE : List[Any] = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
_SCREAMING_SNAKE_CASE : List[str] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
_SCREAMING_SNAKE_CASE : int = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
_SCREAMING_SNAKE_CASE : Dict = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
_SCREAMING_SNAKE_CASE : Dict = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
_SCREAMING_SNAKE_CASE : Tuple = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
_SCREAMING_SNAKE_CASE : str = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple[Dict, Dict]:
_SCREAMING_SNAKE_CASE : str = list(state_dict.keys() )
_SCREAMING_SNAKE_CASE : Tuple = {}
for key in keys:
_SCREAMING_SNAKE_CASE : Dict = state_dict.pop(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : int = rename_keys(__SCREAMING_SNAKE_CASE )
if "in_proj_weight" in key:
# split fused qkv proj
_SCREAMING_SNAKE_CASE : str = val[:hidden_size, :]
_SCREAMING_SNAKE_CASE : Any = val[hidden_size : 2 * hidden_size, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_SCREAMING_SNAKE_CASE : int = val
else:
_SCREAMING_SNAKE_CASE : Dict = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
_SCREAMING_SNAKE_CASE : Optional[Any] = 1_024
_SCREAMING_SNAKE_CASE : str = 24
_SCREAMING_SNAKE_CASE : Any = 16
elif checkpoint == "medium":
_SCREAMING_SNAKE_CASE : Dict = 1_536
_SCREAMING_SNAKE_CASE : Union[str, Any] = 48
_SCREAMING_SNAKE_CASE : Optional[Any] = 24
elif checkpoint == "large":
_SCREAMING_SNAKE_CASE : List[Any] = 2_048
_SCREAMING_SNAKE_CASE : Optional[int] = 48
_SCREAMING_SNAKE_CASE : str = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = MusicgenDecoderConfig(
hidden_size=__SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=__SCREAMING_SNAKE_CASE , num_attention_heads=__SCREAMING_SNAKE_CASE , )
return config
@torch.no_grad()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="cpu" )-> str:
_SCREAMING_SNAKE_CASE : str = MusicGen.get_pretrained(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[str] = decoder_config_from_checkpoint(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = fairseq_model.lm.state_dict()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = rename_state_dict(
__SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size )
_SCREAMING_SNAKE_CASE : Tuple = TaEncoderModel.from_pretrained("""t5-base""" )
_SCREAMING_SNAKE_CASE : List[Any] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
_SCREAMING_SNAKE_CASE : str = MusicgenForCausalLM(__SCREAMING_SNAKE_CASE ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
_SCREAMING_SNAKE_CASE : Dict = MusicgenForConditionalGeneration(text_encoder=__SCREAMING_SNAKE_CASE , audio_encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__SCREAMING_SNAKE_CASE )
# check we can do a forward pass
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_SCREAMING_SNAKE_CASE : Dict = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[int] = model(input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
_SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""t5-base""" )
_SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
_SCREAMING_SNAKE_CASE : Optional[int] = MusicgenProcessor(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
# set the appropriate bos/pad token ids
_SCREAMING_SNAKE_CASE : Optional[Any] = 2_048
_SCREAMING_SNAKE_CASE : List[Any] = 2_048
# set other default generation config params
_SCREAMING_SNAKE_CASE : Any = int(30 * audio_encoder.config.frame_rate )
_SCREAMING_SNAKE_CASE : Tuple = True
_SCREAMING_SNAKE_CASE : int = 3.0
if pytorch_dump_folder is not None:
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(__SCREAMING_SNAKE_CASE )
processor.push_to_hub(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 635 | 0 |
from collections import defaultdict
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> bool:
_SCREAMING_SNAKE_CASE : Any = first_str.lower().strip()
_SCREAMING_SNAKE_CASE : str = second_str.lower().strip()
# Remove whitespace
_SCREAMING_SNAKE_CASE : Any = first_str.replace(""" """ , """""" )
_SCREAMING_SNAKE_CASE : List[Any] = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
return False
# Default values for count should be 0
_SCREAMING_SNAKE_CASE : int = defaultdict(__SCREAMING_SNAKE_CASE )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase_ = input('''Enter the first string ''').strip()
lowerCAmelCase_ = input('''Enter the second string ''').strip()
lowerCAmelCase_ = check_anagrams(input_a, input_b)
print(F"{input_a} and {input_b} are {'' if status else 'not '}anagrams.")
| 703 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "sew"
def __init__( self : List[Any] , _A : Tuple=3_2 , _A : str=7_6_8 , _A : Dict=1_2 , _A : Tuple=1_2 , _A : Optional[Any]=3_0_7_2 , _A : List[str]=2 , _A : Dict="gelu" , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.0 , _A : str=0.1 , _A : Tuple=0.1 , _A : Optional[int]=0.02 , _A : Dict=1e-5 , _A : str="group" , _A : Tuple="gelu" , _A : Union[str, Any]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _A : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _A : Any=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _A : Tuple=False , _A : Tuple=1_2_8 , _A : int=1_6 , _A : Union[str, Any]=True , _A : Optional[Any]=0.05 , _A : List[Any]=1_0 , _A : Union[str, Any]=2 , _A : Tuple=0.0 , _A : Union[str, Any]=1_0 , _A : Optional[int]=0 , _A : Union[str, Any]="mean" , _A : Optional[int]=False , _A : List[Any]=False , _A : int=2_5_6 , _A : str=0 , _A : Optional[int]=1 , _A : List[Any]=2 , **_A : Dict , ):
"""simple docstring"""
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A)
_SCREAMING_SNAKE_CASE : str = hidden_size
_SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_norm
_SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_activation
_SCREAMING_SNAKE_CASE : Dict = list(_A)
_SCREAMING_SNAKE_CASE : int = list(_A)
_SCREAMING_SNAKE_CASE : int = list(_A)
_SCREAMING_SNAKE_CASE : str = conv_bias
_SCREAMING_SNAKE_CASE : Tuple = num_conv_pos_embeddings
_SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embedding_groups
_SCREAMING_SNAKE_CASE : Tuple = len(self.conv_dim)
_SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[str] = intermediate_size
_SCREAMING_SNAKE_CASE : str = squeeze_factor
_SCREAMING_SNAKE_CASE : Dict = hidden_act
_SCREAMING_SNAKE_CASE : str = num_attention_heads
_SCREAMING_SNAKE_CASE : Dict = hidden_dropout
_SCREAMING_SNAKE_CASE : Tuple = attention_dropout
_SCREAMING_SNAKE_CASE : int = activation_dropout
_SCREAMING_SNAKE_CASE : Any = feat_proj_dropout
_SCREAMING_SNAKE_CASE : str = final_dropout
_SCREAMING_SNAKE_CASE : Union[str, Any] = layerdrop
_SCREAMING_SNAKE_CASE : Any = layer_norm_eps
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : List[Any] = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_SCREAMING_SNAKE_CASE : List[Any] = apply_spec_augment
_SCREAMING_SNAKE_CASE : List[Any] = mask_time_prob
_SCREAMING_SNAKE_CASE : List[str] = mask_time_length
_SCREAMING_SNAKE_CASE : List[Any] = mask_time_min_masks
_SCREAMING_SNAKE_CASE : List[Any] = mask_feature_prob
_SCREAMING_SNAKE_CASE : int = mask_feature_length
_SCREAMING_SNAKE_CASE : List[Any] = mask_feature_min_masks
# ctc loss
_SCREAMING_SNAKE_CASE : int = ctc_loss_reduction
_SCREAMING_SNAKE_CASE : Optional[int] = ctc_zero_infinity
# sequence classification
_SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum
_SCREAMING_SNAKE_CASE : List[str] = classifier_proj_size
@property
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1)
| 635 | 0 |
"""simple docstring"""
import os
def lowerCamelCase_()-> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = os.path.dirname(os.path.realpath(_lowercase ) )
_SCREAMING_SNAKE_CASE : Tuple = os.path.join(_lowercase , """triangle.txt""" )
with open(_lowercase ) as f:
_SCREAMING_SNAKE_CASE : Dict = f.readlines()
_SCREAMING_SNAKE_CASE : Tuple = []
for line in triangle:
_SCREAMING_SNAKE_CASE : List[Any] = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(_lowercase ) )
a.append(_lowercase )
for i in range(1 , len(_lowercase ) ):
for j in range(len(a[i] ) ):
_SCREAMING_SNAKE_CASE : str = a[i - 1][j] if j != len(a[i - 1] ) else 0
_SCREAMING_SNAKE_CASE : Optional[int] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_lowercase , _lowercase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 704 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 635 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]:
return [ord(_A ) - 96 for elem in plain]
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Dict:
return "".join(chr(elem + 96 ) for elem in encoded )
def lowerCamelCase_()-> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , _A )
print("""Decoded:""" , decode(_A ) )
if __name__ == "__main__":
main()
| 705 | """simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
_SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple:
_SCREAMING_SNAKE_CASE : int = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : List[Any] = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE : List[Any] = (
Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple:
_SCREAMING_SNAKE_CASE : Tuple = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : Dict = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str:
if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = parquet_path
elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path]
_SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=("train",) )-> Union[str, Any]:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for split in splits:
_SCREAMING_SNAKE_CASE : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
_SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : List[str] = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE : str = (
Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE : int = ParquetDatasetReader({"""train""": parquet_path} , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
if split:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {split: parquet_path}
else:
_SCREAMING_SNAKE_CASE : Optional[int] = """train"""
_SCREAMING_SNAKE_CASE : Any = {"""train""": parquet_path, """test""": parquet_path}
_SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]:
_SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / """foo.parquet""" )
_SCREAMING_SNAKE_CASE : str = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Dict = str(shared_datadir / """test_image_rgb.jpg""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""image""": [image_path]}
_SCREAMING_SNAKE_CASE : Optional[Any] = Features({"""image""": Image()} )
_SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__SCREAMING_SNAKE_CASE ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int:
assert get_writer_batch_size(__SCREAMING_SNAKE_CASE ) == expected
| 635 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[Any]:
_SCREAMING_SNAKE_CASE : int = emb.weight.shape
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
_SCREAMING_SNAKE_CASE : int = emb.weight.data
return lin_layer
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
_SCREAMING_SNAKE_CASE : str = torch.load(lowerCamelCase_ , map_location="""cpu""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = mam_aaa['args'] or mam_aaa['cfg']['model']
_SCREAMING_SNAKE_CASE : Dict = mam_aaa['model']
remove_ignore_keys_(lowerCamelCase_ )
_SCREAMING_SNAKE_CASE : str = state_dict['encoder.embed_tokens.weight'].shape[0]
_SCREAMING_SNAKE_CASE : Tuple = MaMaaaConfig(
vocab_size=lowerCamelCase_ , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
_SCREAMING_SNAKE_CASE : List[Any] = state_dict['decoder.embed_tokens.weight']
_SCREAMING_SNAKE_CASE : Tuple = MaMaaaForConditionalGeneration(lowerCamelCase_ )
model.model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
_SCREAMING_SNAKE_CASE : int = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 706 | """simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""only integers accepted as input""" )
else:
_SCREAMING_SNAKE_CASE : List[Any] = str(abs(__SCREAMING_SNAKE_CASE ) )
_SCREAMING_SNAKE_CASE : List[str] = [list(__SCREAMING_SNAKE_CASE ) for char in range(len(__SCREAMING_SNAKE_CASE ) )]
for index in range(len(__SCREAMING_SNAKE_CASE ) ):
num_transpositions[index].pop(__SCREAMING_SNAKE_CASE )
return max(
int("""""".join(list(__SCREAMING_SNAKE_CASE ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 635 | 0 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class _snake_case ( UpperCAmelCase_ ):
"""simple docstring"""
a = 'mvp'
a = ['past_key_values']
a = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : str , _A : Optional[Any]=5_0_2_6_7 , _A : Optional[int]=1_0_2_4 , _A : Dict=1_2 , _A : Tuple=4_0_9_6 , _A : Dict=1_6 , _A : Tuple=1_2 , _A : int=4_0_9_6 , _A : List[Any]=1_6 , _A : Optional[Any]=0.0 , _A : Optional[int]=0.0 , _A : str="gelu" , _A : Union[str, Any]=1_0_2_4 , _A : int=0.1 , _A : Union[str, Any]=0.0 , _A : Optional[Any]=0.0 , _A : int=0.02 , _A : int=0.0 , _A : List[Any]=False , _A : Optional[int]=True , _A : Dict=1 , _A : Optional[Any]=0 , _A : List[Any]=2 , _A : Optional[int]=True , _A : Dict=2 , _A : List[str]=2 , _A : Tuple=False , _A : Optional[int]=1_0_0 , _A : Tuple=8_0_0 , **_A : int , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
_SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
_SCREAMING_SNAKE_CASE : str = d_model
_SCREAMING_SNAKE_CASE : Any = encoder_ffn_dim
_SCREAMING_SNAKE_CASE : Dict = encoder_layers
_SCREAMING_SNAKE_CASE : int = encoder_attention_heads
_SCREAMING_SNAKE_CASE : Any = decoder_ffn_dim
_SCREAMING_SNAKE_CASE : List[Any] = decoder_layers
_SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads
_SCREAMING_SNAKE_CASE : List[Any] = dropout
_SCREAMING_SNAKE_CASE : List[str] = attention_dropout
_SCREAMING_SNAKE_CASE : int = activation_dropout
_SCREAMING_SNAKE_CASE : List[str] = activation_function
_SCREAMING_SNAKE_CASE : int = init_std
_SCREAMING_SNAKE_CASE : str = encoder_layerdrop
_SCREAMING_SNAKE_CASE : str = decoder_layerdrop
_SCREAMING_SNAKE_CASE : List[str] = classifier_dropout
_SCREAMING_SNAKE_CASE : Any = use_cache
_SCREAMING_SNAKE_CASE : List[Any] = encoder_layers
_SCREAMING_SNAKE_CASE : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
_SCREAMING_SNAKE_CASE : int = use_prompt
_SCREAMING_SNAKE_CASE : List[str] = prompt_length
_SCREAMING_SNAKE_CASE : List[str] = prompt_mid_dim
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , _lowercase):
_SCREAMING_SNAKE_CASE : Any = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""")
| 707 | """simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A)
_SCREAMING_SNAKE_CASE : Any = -1
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
_SCREAMING_SNAKE_CASE : Dict = model.generate(_A , max_new_tokens=1_0 , do_sample=_A)
_SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(greedy_ids[0])
with CaptureStdout() as cs:
_SCREAMING_SNAKE_CASE : Any = TextStreamer(_A)
model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_SCREAMING_SNAKE_CASE : str = cs.out[:-1]
self.assertEqual(_A , _A)
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
_SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A)
_SCREAMING_SNAKE_CASE : List[Any] = -1
_SCREAMING_SNAKE_CASE : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A)
_SCREAMING_SNAKE_CASE : Any = tokenizer.decode(greedy_ids[0])
_SCREAMING_SNAKE_CASE : List[Any] = TextIteratorStreamer(_A)
_SCREAMING_SNAKE_CASE : Any = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer}
_SCREAMING_SNAKE_CASE : List[Any] = Thread(target=model.generate , kwargs=_A)
thread.start()
_SCREAMING_SNAKE_CASE : Any = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_A , _A)
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
_SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A)
_SCREAMING_SNAKE_CASE : Any = -1
_SCREAMING_SNAKE_CASE : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A)
_SCREAMING_SNAKE_CASE : str = greedy_ids[:, input_ids.shape[1] :]
_SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(new_greedy_ids[0])
with CaptureStdout() as cs:
_SCREAMING_SNAKE_CASE : Any = TextStreamer(_A , skip_prompt=_A)
model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_SCREAMING_SNAKE_CASE : Optional[int] = cs.out[:-1]
self.assertEqual(_A , _A)
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""distilgpt2""")
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""").to(_A)
_SCREAMING_SNAKE_CASE : int = -1
_SCREAMING_SNAKE_CASE : List[str] = torch.ones((1, 5) , device=_A).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_SCREAMING_SNAKE_CASE : Optional[int] = TextStreamer(_A , skip_special_tokens=_A)
model.generate(_A , max_new_tokens=1 , do_sample=_A , streamer=_A)
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_SCREAMING_SNAKE_CASE : Optional[Any] = cs.out[:-1] # Remove the final "\n"
_SCREAMING_SNAKE_CASE : Tuple = tokenizer(_A , return_tensors="""pt""")
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1))
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
_SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A)
_SCREAMING_SNAKE_CASE : Tuple = -1
_SCREAMING_SNAKE_CASE : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
_SCREAMING_SNAKE_CASE : int = TextIteratorStreamer(_A , timeout=0.001)
_SCREAMING_SNAKE_CASE : List[Any] = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer}
_SCREAMING_SNAKE_CASE : List[str] = Thread(target=model.generate , kwargs=_A)
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_A):
_SCREAMING_SNAKE_CASE : str = """"""
for new_text in streamer:
streamer_text += new_text
| 635 | 0 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowerCAmelCase_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
for pegasus_name, hf_name in PATTERNS:
_SCREAMING_SNAKE_CASE : Optional[int] = k.replace(__A , __A )
return k
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> PegasusForConditionalGeneration:
_SCREAMING_SNAKE_CASE : List[str] = DEFAULTS.copy()
cfg_kwargs.update(__A )
_SCREAMING_SNAKE_CASE : Dict = PegasusConfig(**__A )
_SCREAMING_SNAKE_CASE : Optional[int] = PegasusForConditionalGeneration(__A )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch_model.model.state_dict()
_SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for k, v in tf_weights.items():
_SCREAMING_SNAKE_CASE : Dict = rename_state_dict_key(__A )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
_SCREAMING_SNAKE_CASE : Union[str, Any] = v.T
_SCREAMING_SNAKE_CASE : Any = torch.tensor(__A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
_SCREAMING_SNAKE_CASE : int = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
_SCREAMING_SNAKE_CASE : Dict = mapping['''shared.weight''']
_SCREAMING_SNAKE_CASE : List[str] = mapping['''shared.weight''']
_SCREAMING_SNAKE_CASE : Optional[int] = {k: torch.zeros_like(__A ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__A )
_SCREAMING_SNAKE_CASE : List[str] = torch_model.model.load_state_dict(__A , strict=__A )
_SCREAMING_SNAKE_CASE : Optional[int] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCamelCase_(__SCREAMING_SNAKE_CASE="./ckpt/aeslc/model.ckpt-32000" )-> Dict:
_SCREAMING_SNAKE_CASE : Dict = tf.train.list_variables(__A )
_SCREAMING_SNAKE_CASE : str = {}
_SCREAMING_SNAKE_CASE : Union[str, Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(__A , desc="""converting tf checkpoint to dict""" ):
_SCREAMING_SNAKE_CASE : int = any(pat in name for pat in ignore_name )
if skip_key:
continue
_SCREAMING_SNAKE_CASE : List[Any] = tf.train.load_variable(__A , __A )
_SCREAMING_SNAKE_CASE : Tuple = array
return tf_weights
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
_SCREAMING_SNAKE_CASE : Any = Path(__A ).parent.name
_SCREAMING_SNAKE_CASE : List[Any] = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
_SCREAMING_SNAKE_CASE : List[str] = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__A )
# convert model
_SCREAMING_SNAKE_CASE : str = get_tf_weights_as_numpy(__A )
_SCREAMING_SNAKE_CASE : List[Any] = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
_SCREAMING_SNAKE_CASE : Tuple = task_specific_params
_SCREAMING_SNAKE_CASE : Optional[Any] = convert_pegasus(__A , __A )
torch_model.save_pretrained(__A )
_SCREAMING_SNAKE_CASE : int = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__A , Path(__A ) / """pytorch_model.bin""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase_ = parser.parse_args()
if args.save_dir is None:
lowerCAmelCase_ = Path(args.tf_ckpt_path).parent.name
lowerCAmelCase_ = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 708 | """simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "facebook/bart-large-mnli"
a = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a = "text_classifier"
a = AutoTokenizer
a = AutoModelForSequenceClassification
a = ["text", ["text"]]
a = ["text"]
def _lowerCAmelCase ( self : int):
"""simple docstring"""
super().setup()
_SCREAMING_SNAKE_CASE : Any = self.model.config
_SCREAMING_SNAKE_CASE : Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail"""):
_SCREAMING_SNAKE_CASE : List[Any] = int(_A)
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""")
def _lowerCAmelCase ( self : Optional[Any] , _A : Tuple , _A : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = labels
return self.pre_processor(
[text] * len(_A) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def _lowerCAmelCase ( self : Tuple , _A : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = outputs.logits
_SCREAMING_SNAKE_CASE : List[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 635 | 0 |
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _snake_case :
"""simple docstring"""
a = 42
a = None
@staticmethod
def _lowerCAmelCase ( ):
"""simple docstring"""
raise NotImplementedError
def _lowerCAmelCase ( self : Dict , _A : Optional[Any] , _A : Union[str, Any] , _A : Optional[Any] , **_A : Dict):
"""simple docstring"""
raise NotImplementedError
def _lowerCAmelCase ( self : List[Any] , _A : Any):
"""simple docstring"""
raise NotImplementedError
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
f"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""")
@classmethod
def _lowerCAmelCase ( cls : Tuple):
"""simple docstring"""
return f"""`pip install {cls.pip_package or cls.name}`"""
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "optuna"
@staticmethod
def _lowerCAmelCase ( ):
"""simple docstring"""
return is_optuna_available()
def _lowerCAmelCase ( self : List[Any] , _A : Any , _A : str , _A : Tuple , **_A : List[Any]):
"""simple docstring"""
return run_hp_search_optuna(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase)
def _lowerCAmelCase ( self : Optional[Any] , _A : int):
"""simple docstring"""
return default_hp_space_optuna(_lowerCAmelCase)
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "ray"
a = "\'ray[tune]\'"
@staticmethod
def _lowerCAmelCase ( ):
"""simple docstring"""
return is_ray_available()
def _lowerCAmelCase ( self : List[str] , _A : List[str] , _A : Optional[Any] , _A : List[str] , **_A : List[Any]):
"""simple docstring"""
return run_hp_search_ray(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase)
def _lowerCAmelCase ( self : int , _A : Optional[int]):
"""simple docstring"""
return default_hp_space_ray(_lowerCAmelCase)
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "sigopt"
@staticmethod
def _lowerCAmelCase ( ):
"""simple docstring"""
return is_sigopt_available()
def _lowerCAmelCase ( self : Optional[Any] , _A : Union[str, Any] , _A : str , _A : List[Any] , **_A : Optional[int]):
"""simple docstring"""
return run_hp_search_sigopt(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase)
def _lowerCAmelCase ( self : Tuple , _A : Optional[int]):
"""simple docstring"""
return default_hp_space_sigopt(_lowerCAmelCase)
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "wandb"
@staticmethod
def _lowerCAmelCase ( ):
"""simple docstring"""
return is_wandb_available()
def _lowerCAmelCase ( self : List[Any] , _A : str , _A : Optional[int] , _A : Union[str, Any] , **_A : Optional[Any]):
"""simple docstring"""
return run_hp_search_wandb(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase)
def _lowerCAmelCase ( self : Optional[Any] , _A : List[str]):
"""simple docstring"""
return default_hp_space_wandb(_lowerCAmelCase)
lowerCAmelCase_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCamelCase_()-> Dict:
_SCREAMING_SNAKE_CASE : Any = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__lowerCAmelCase ) > 0:
_SCREAMING_SNAKE_CASE : Tuple = available_backends[0].name
if len(__lowerCAmelCase ) > 1:
logger.info(
F"""{len(__lowerCAmelCase )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
F""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 709 | """simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""")
_SCREAMING_SNAKE_CASE : str = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""")
model.to(_A)
from datasets import load_dataset
_SCREAMING_SNAKE_CASE : Any = load_dataset("""nielsr/rvlcdip-demo""")
_SCREAMING_SNAKE_CASE : Any = dataset["""train"""][0]["""image"""].convert("""RGB""")
_SCREAMING_SNAKE_CASE : str = image_processor(_A , return_tensors="""pt""").to(_A)
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Any = model(**_A)
_SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
_SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_6))
self.assertEqual(logits.shape , _A)
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=_A , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1e-4))
| 635 | 0 |
"""simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str:
_SCREAMING_SNAKE_CASE : Any = int(lowerCamelCase__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : List[str] = divmod(lowerCamelCase__ , 2 )
return binary_recursive(lowerCamelCase__ ) + str(lowerCamelCase__ )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str:
_SCREAMING_SNAKE_CASE : str = str(lowerCamelCase__ ).strip()
if not number:
raise ValueError("""No input value was provided""" )
_SCREAMING_SNAKE_CASE : List[str] = "-" if number.startswith("""-""" ) else ""
_SCREAMING_SNAKE_CASE : List[Any] = number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return F"""{negative}0b{binary_recursive(int(lowerCamelCase__ ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 710 | """simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "M-CLIP"
def __init__( self : Optional[Any] , _A : List[str]=1_0_2_4 , _A : Union[str, Any]=7_6_8 , **_A : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = transformerDimSize
_SCREAMING_SNAKE_CASE : List[str] = imageDimSize
super().__init__(**_A)
class _snake_case ( __snake_case ):
"""simple docstring"""
a = MCLIPConfig
def __init__( self : Dict , _A : Optional[Any] , *_A : Any , **_A : Dict):
"""simple docstring"""
super().__init__(_A , *_A , **_A)
_SCREAMING_SNAKE_CASE : Tuple = XLMRobertaModel(_A)
_SCREAMING_SNAKE_CASE : List[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims)
def _lowerCAmelCase ( self : Union[str, Any] , _A : str , _A : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.transformer(input_ids=_A , attention_mask=_A)[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
return self.LinearTransformation(_A), embs
| 635 | 0 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowerCAmelCase_ = data_utils.TransfoXLTokenizer
lowerCAmelCase_ = data_utils.TransfoXLCorpus
lowerCAmelCase_ = data_utils
lowerCAmelCase_ = data_utils
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_lowerCAmelCase , """rb""" ) as fp:
_SCREAMING_SNAKE_CASE : List[Any] = pickle.load(_lowerCAmelCase , encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_SCREAMING_SNAKE_CASE : Union[str, Any] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
_SCREAMING_SNAKE_CASE : Any = corpus.vocab.__dict__
torch.save(_lowerCAmelCase , _lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , _lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Tuple = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(_lowerCAmelCase , _lowerCAmelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.abspath(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Any = os.path.abspath(_lowerCAmelCase )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_SCREAMING_SNAKE_CASE : List[Any] = TransfoXLConfig()
else:
_SCREAMING_SNAKE_CASE : List[str] = TransfoXLConfig.from_json_file(_lowerCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
_SCREAMING_SNAKE_CASE : List[str] = TransfoXLLMHeadModel(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : List[str] = load_tf_weights_in_transfo_xl(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
_SCREAMING_SNAKE_CASE : List[str] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
print(F"""Save PyTorch model to {os.path.abspath(_lowerCAmelCase )}""" )
torch.save(model.state_dict() , _lowerCAmelCase )
print(F"""Save configuration file to {os.path.abspath(_lowerCAmelCase )}""" )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
lowerCAmelCase_ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 711 | """simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
_SCREAMING_SNAKE_CASE : int = precision
_SCREAMING_SNAKE_CASE : Dict = ceil(precision / 14 )
_SCREAMING_SNAKE_CASE : int = 426_880 * Decimal(10_005 ).sqrt()
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
_SCREAMING_SNAKE_CASE : str = 13_591_409
_SCREAMING_SNAKE_CASE : Tuple = Decimal(__SCREAMING_SNAKE_CASE )
for k in range(1 , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(__SCREAMING_SNAKE_CASE ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCAmelCase_ = 50
print(F"The first {n} digits of pi is: {pi(n)}")
| 635 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""")
_SCREAMING_SNAKE_CASE : Any = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_SCREAMING_SNAKE_CASE : Optional[int] = model(_a)["""last_hidden_state"""]
_SCREAMING_SNAKE_CASE : List[str] = tf.TensorShape((1, 1_0, 7_6_8))
self.assertEqual(output.shape , _a)
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4))
| 712 | """simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
_SCREAMING_SNAKE_CASE : Optional[int] = TapasConfig.from_json_file(__SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
_SCREAMING_SNAKE_CASE : Dict = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_SCREAMING_SNAKE_CASE : str = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
_SCREAMING_SNAKE_CASE : Optional[int] = 4
_SCREAMING_SNAKE_CASE : Any = True
# hparam_utils.py hparams
_SCREAMING_SNAKE_CASE : Any = 0.66_46_94
_SCREAMING_SNAKE_CASE : str = 0.20_79_51
_SCREAMING_SNAKE_CASE : str = 0.12_11_94
_SCREAMING_SNAKE_CASE : List[Any] = True
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = 0.0_35_25_13
_SCREAMING_SNAKE_CASE : Optional[Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_SCREAMING_SNAKE_CASE : int = 4
_SCREAMING_SNAKE_CASE : Tuple = False
# hparam_utils.py hparams
_SCREAMING_SNAKE_CASE : Any = 36.45_19
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0.90_34_21
_SCREAMING_SNAKE_CASE : Optional[Any] = 2_22.0_88
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Optional[int] = True
_SCREAMING_SNAKE_CASE : Dict = 0.76_31_41
_SCREAMING_SNAKE_CASE : Union[str, Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
_SCREAMING_SNAKE_CASE : int = TapasForSequenceClassification(config=__SCREAMING_SNAKE_CASE )
elif task == "MLM":
_SCREAMING_SNAKE_CASE : int = TapasForMaskedLM(config=__SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
_SCREAMING_SNAKE_CASE : int = TapasModel(config=__SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
_SCREAMING_SNAKE_CASE : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 635 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 713 | """simple docstring"""
from typing import Any
import numpy as np
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool:
return np.array_equal(__SCREAMING_SNAKE_CASE , matrix.conjugate().T )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any:
_SCREAMING_SNAKE_CASE : Optional[int] = v.conjugate().T
_SCREAMING_SNAKE_CASE : Optional[int] = v_star.dot(__SCREAMING_SNAKE_CASE )
assert isinstance(__SCREAMING_SNAKE_CASE , np.ndarray )
return (v_star_dot.dot(__SCREAMING_SNAKE_CASE )) / (v_star.dot(__SCREAMING_SNAKE_CASE ))
def lowerCamelCase_()-> None:
_SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
_SCREAMING_SNAKE_CASE : int = np.array([[1], [2], [3]] )
assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian."""
print(rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
_SCREAMING_SNAKE_CASE : int = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian."""
assert rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 635 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''Salesforce/blip-vqa-base''': '''https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json''',
'''Salesforce/blip-vqa-capfit-large''': (
'''https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-base''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-large''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'''
),
'''Salesforce/blip-itm-base-coco''': '''https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-large-coco''': '''https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-base-flikr''': '''https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json''',
'''Salesforce/blip-itm-large-flikr''': (
'''https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'''
),
}
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "blip_text_model"
def __init__( self : Optional[int] , _A : Dict=3_0_5_2_4 , _A : Union[str, Any]=7_6_8 , _A : Optional[int]=7_6_8 , _A : Tuple=3_0_7_2 , _A : List[Any]=7_6_8 , _A : Tuple=1_2 , _A : Tuple=8 , _A : List[str]=5_1_2 , _A : Optional[Any]="gelu" , _A : Optional[Any]=1e-12 , _A : Optional[Any]=0.0 , _A : str=0.0 , _A : List[Any]=0.02 , _A : int=3_0_5_2_2 , _A : Union[str, Any]=2 , _A : List[Any]=0 , _A : str=1_0_2 , _A : int=True , _A : Tuple=True , **_A : List[str] , ):
"""simple docstring"""
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , sep_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = encoder_hidden_size
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = projection_dim
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = is_decoder
_SCREAMING_SNAKE_CASE = use_cache
@classmethod
def _lowerCAmelCase ( cls : Any , _A : Union[str, os.PathLike] , **_A : Optional[Any]):
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase)
# get the text config dict if we are loading from BlipConfig
if config_dict.get("""model_type""") == "blip":
_SCREAMING_SNAKE_CASE = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase)
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "blip_vision_model"
def __init__( self : Optional[int] , _A : List[Any]=7_6_8 , _A : Dict=3_0_7_2 , _A : Any=5_1_2 , _A : Optional[Any]=1_2 , _A : List[Any]=1_2 , _A : List[str]=3_8_4 , _A : Optional[Any]=1_6 , _A : Tuple="gelu" , _A : Optional[Any]=1e-5 , _A : int=0.0 , _A : Union[str, Any]=1e-10 , **_A : Optional[Any] , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase)
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = projection_dim
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = attention_dropout
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = hidden_act
@classmethod
def _lowerCAmelCase ( cls : str , _A : Union[str, os.PathLike] , **_A : List[str]):
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase)
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("""model_type""") == "blip":
_SCREAMING_SNAKE_CASE = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase)
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "blip"
a = True
def __init__( self : Tuple , _A : Any=None , _A : List[str]=None , _A : int=5_1_2 , _A : Dict=2.6_592 , _A : int=2_5_6 , **_A : str , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase)
if text_config is None:
_SCREAMING_SNAKE_CASE = {}
logger.info("""`text_config` is `None`. Initializing the `BlipTextConfig` with default values.""")
if vision_config is None:
_SCREAMING_SNAKE_CASE = {}
logger.info("""`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.""")
_SCREAMING_SNAKE_CASE = BlipTextConfig(**_UpperCAmelCase)
_SCREAMING_SNAKE_CASE = BlipVisionConfig(**_UpperCAmelCase)
_SCREAMING_SNAKE_CASE = self.vision_config.hidden_size
_SCREAMING_SNAKE_CASE = projection_dim
_SCREAMING_SNAKE_CASE = logit_scale_init_value
_SCREAMING_SNAKE_CASE = 1.0
_SCREAMING_SNAKE_CASE = 0.02
_SCREAMING_SNAKE_CASE = image_text_hidden_size
@classmethod
def _lowerCAmelCase ( cls : List[Any] , _A : BlipTextConfig , _A : BlipVisionConfig , **_A : Any):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase)
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__)
_SCREAMING_SNAKE_CASE = self.text_config.to_dict()
_SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
_SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 714 | """simple docstring"""
from __future__ import annotations
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 635 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str:
_SCREAMING_SNAKE_CASE : Optional[Any] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_SCREAMING_SNAKE_CASE : Tuple = [144, 192, 240]
_SCREAMING_SNAKE_CASE : str = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_SCREAMING_SNAKE_CASE : List[Any] = [96, 120, 144]
_SCREAMING_SNAKE_CASE : Optional[int] = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [64, 80, 96]
_SCREAMING_SNAKE_CASE : Tuple = [16, 16, 24, 48, 64, 80, 320]
_SCREAMING_SNAKE_CASE : Any = 0.05
_SCREAMING_SNAKE_CASE : Tuple = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = 512
_SCREAMING_SNAKE_CASE : Optional[int] = 16
_SCREAMING_SNAKE_CASE : Optional[Any] = 21
_SCREAMING_SNAKE_CASE : Dict = """pascal-voc-id2label.json"""
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = 1_000
_SCREAMING_SNAKE_CASE : Tuple = """imagenet-1k-id2label.json"""
_SCREAMING_SNAKE_CASE : Tuple = """huggingface/label-files"""
_SCREAMING_SNAKE_CASE : Optional[Any] = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
_SCREAMING_SNAKE_CASE : Dict = {int(__A ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE : Tuple = idalabel
_SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> str:
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
_SCREAMING_SNAKE_CASE : Tuple = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
_SCREAMING_SNAKE_CASE : Any = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
_SCREAMING_SNAKE_CASE : Tuple = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
_SCREAMING_SNAKE_CASE : Optional[int] = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
_SCREAMING_SNAKE_CASE : Tuple = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
_SCREAMING_SNAKE_CASE : List[Any] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
_SCREAMING_SNAKE_CASE : Optional[int] = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
_SCREAMING_SNAKE_CASE : List[Any] = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
_SCREAMING_SNAKE_CASE : Optional[int] = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
_SCREAMING_SNAKE_CASE : str = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
_SCREAMING_SNAKE_CASE : Dict = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
_SCREAMING_SNAKE_CASE : List[Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
_SCREAMING_SNAKE_CASE : int = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
_SCREAMING_SNAKE_CASE : int = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
_SCREAMING_SNAKE_CASE : Optional[int] = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
_SCREAMING_SNAKE_CASE : Optional[int] = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
_SCREAMING_SNAKE_CASE : Tuple = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
_SCREAMING_SNAKE_CASE : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
_SCREAMING_SNAKE_CASE : int = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
_SCREAMING_SNAKE_CASE : str = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
_SCREAMING_SNAKE_CASE : Optional[int] = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
_SCREAMING_SNAKE_CASE : Optional[int] = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
_SCREAMING_SNAKE_CASE : int = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
_SCREAMING_SNAKE_CASE : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
_SCREAMING_SNAKE_CASE : Any = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
_SCREAMING_SNAKE_CASE : Optional[Any] = """mobilevit.""" + name
return name
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> int:
if base_model:
_SCREAMING_SNAKE_CASE : Union[str, Any] = """"""
else:
_SCREAMING_SNAKE_CASE : Dict = """mobilevit."""
for key in orig_state_dict.copy().keys():
_SCREAMING_SNAKE_CASE : Optional[Any] = orig_state_dict.pop(__A )
if key[:8] == "encoder.":
_SCREAMING_SNAKE_CASE : str = key[8:]
if "qkv" in key:
_SCREAMING_SNAKE_CASE : List[str] = key.split(""".""" )
_SCREAMING_SNAKE_CASE : List[Any] = int(key_split[0][6:] ) - 1
_SCREAMING_SNAKE_CASE : str = int(key_split[3] )
_SCREAMING_SNAKE_CASE : Any = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
_SCREAMING_SNAKE_CASE : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_SCREAMING_SNAKE_CASE : int = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
_SCREAMING_SNAKE_CASE : Union[str, Any] = val[:dim, :]
_SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :]
_SCREAMING_SNAKE_CASE : Any = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE : Any = val[:dim]
_SCREAMING_SNAKE_CASE : str = val[dim : dim * 2]
_SCREAMING_SNAKE_CASE : str = val[-dim:]
else:
_SCREAMING_SNAKE_CASE : List[str] = val
return orig_state_dict
def lowerCamelCase_()-> Any:
_SCREAMING_SNAKE_CASE : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_SCREAMING_SNAKE_CASE : Any = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> Dict:
_SCREAMING_SNAKE_CASE : List[str] = get_mobilevit_config(__A )
# load original state_dict
_SCREAMING_SNAKE_CASE : int = torch.load(__A , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
_SCREAMING_SNAKE_CASE : List[str] = MobileViTForSemanticSegmentation(__A ).eval()
else:
_SCREAMING_SNAKE_CASE : List[str] = MobileViTForImageClassification(__A ).eval()
_SCREAMING_SNAKE_CASE : List[str] = convert_state_dict(__A , __A )
model.load_state_dict(__A )
# Check outputs on an image, prepared by MobileViTImageProcessor
_SCREAMING_SNAKE_CASE : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_SCREAMING_SNAKE_CASE : Any = image_processor(images=prepare_img() , return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE : Any = model(**__A )
_SCREAMING_SNAKE_CASE : Any = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , __A , atol=1e-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
_SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
_SCREAMING_SNAKE_CASE : Dict = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
_SCREAMING_SNAKE_CASE : List[str] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , __A , atol=1e-4 )
Path(__A ).mkdir(exist_ok=__A )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
if push_to_hub:
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
_SCREAMING_SNAKE_CASE : Dict = model_mapping[mobilevit_name]
image_processor.push_to_hub(__A , organization="""apple""" )
model.push_to_hub(__A , organization="""apple""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 715 | """simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 )-> str:
_SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetDict(
{
"""train""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ),
"""validation""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_SCREAMING_SNAKE_CASE : str = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_SCREAMING_SNAKE_CASE : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_SCREAMING_SNAKE_CASE : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_SCREAMING_SNAKE_CASE : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
_SCREAMING_SNAKE_CASE : Any = 8
else:
_SCREAMING_SNAKE_CASE : Optional[int] = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = DataLoader(
tokenized_datasets["""test"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader, test_dataloader
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
# New Code #
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
# Download the dataset
_SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("""glue""" , """mrpc""" )
# Create our splits
_SCREAMING_SNAKE_CASE : Dict = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_SCREAMING_SNAKE_CASE : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE : Tuple = config["""lr"""]
_SCREAMING_SNAKE_CASE : Tuple = int(config["""num_epochs"""] )
_SCREAMING_SNAKE_CASE : int = int(config["""seed"""] )
_SCREAMING_SNAKE_CASE : int = int(config["""batch_size"""] )
_SCREAMING_SNAKE_CASE : List[str] = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_SCREAMING_SNAKE_CASE : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_SCREAMING_SNAKE_CASE : List[str] = batch_size // MAX_GPU_BATCH_SIZE
_SCREAMING_SNAKE_CASE : List[str] = MAX_GPU_BATCH_SIZE
set_seed(__SCREAMING_SNAKE_CASE )
# New Code #
# Create our folds:
_SCREAMING_SNAKE_CASE : List[str] = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_SCREAMING_SNAKE_CASE : Optional[Any] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = get_fold_dataloaders(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_SCREAMING_SNAKE_CASE : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_SCREAMING_SNAKE_CASE : int = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
_SCREAMING_SNAKE_CASE : int = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(**__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = outputs.loss
_SCREAMING_SNAKE_CASE : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , )
_SCREAMING_SNAKE_CASE : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , __SCREAMING_SNAKE_CASE )
# New Code #
# We also run predictions on the test set at the very end
_SCREAMING_SNAKE_CASE : str = []
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 )
_SCREAMING_SNAKE_CASE : List[str] = torch.stack(__SCREAMING_SNAKE_CASE , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_SCREAMING_SNAKE_CASE : int = metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )
accelerator.print("""Average test metrics from all folds:""" , __SCREAMING_SNAKE_CASE )
def lowerCamelCase_()-> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=__SCREAMING_SNAKE_CASE , default=3 , help="""The number of splits to perform across the dataset""" )
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
_SCREAMING_SNAKE_CASE : Optional[int] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 635 | 0 |
"""simple docstring"""
def lowerCamelCase_( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[Any]:
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 716 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 635 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''YolosFeatureExtractor''']
lowerCAmelCase_ = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 717 | """simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _snake_case :
"""simple docstring"""
def __init__( self : int , _A : List[Any] , _A : int , _A : int):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""")
_SCREAMING_SNAKE_CASE : str = img
_SCREAMING_SNAKE_CASE : Optional[Any] = img.shape[1]
_SCREAMING_SNAKE_CASE : Tuple = img.shape[0]
_SCREAMING_SNAKE_CASE : Any = dst_width
_SCREAMING_SNAKE_CASE : Any = dst_height
_SCREAMING_SNAKE_CASE : Any = self.src_w / self.dst_w
_SCREAMING_SNAKE_CASE : Dict = self.src_h / self.dst_h
_SCREAMING_SNAKE_CASE : Optional[Any] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 2_5_5
)
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
for i in range(self.dst_h):
for j in range(self.dst_w):
_SCREAMING_SNAKE_CASE : Any = self.img[self.get_y(_A)][self.get_x(_A)]
def _lowerCAmelCase ( self : int , _A : int):
"""simple docstring"""
return int(self.ratio_x * x)
def _lowerCAmelCase ( self : str , _A : int):
"""simple docstring"""
return int(self.ratio_y * y)
if __name__ == "__main__":
lowerCAmelCase_ , lowerCAmelCase_ = 800, 600
lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1)
lowerCAmelCase_ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output
)
waitKey(0)
destroyAllWindows()
| 635 | 0 |
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
if n == 1 or not isinstance(a_ , a_ ):
return 0
elif n == 2:
return 1
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : List[Any] = 2
while digits < n:
index += 1
_SCREAMING_SNAKE_CASE : Optional[Any] = len(str(fibonacci(a_ ) ) )
return index
def lowerCamelCase_(__SCREAMING_SNAKE_CASE = 1_000 )-> int:
return fibonacci_digits_index(a_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 718 | """simple docstring"""
import argparse
from collections import defaultdict
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int:
_SCREAMING_SNAKE_CASE : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines()
_SCREAMING_SNAKE_CASE : Optional[Any] = F"""class {class_name}("""
_SCREAMING_SNAKE_CASE : List[Any] = F"""{4 * " "}def {test_name}("""
_SCREAMING_SNAKE_CASE : Tuple = F"""{8 * " "}{correct_line.split()[0]}"""
_SCREAMING_SNAKE_CASE : List[Any] = F"""{16 * " "}{correct_line.split()[0]}"""
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
_SCREAMING_SNAKE_CASE : Dict = []
for line in lines:
if line.startswith(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = True
elif in_class and line.startswith(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = True
elif in_class and in_func and (line.startswith(__SCREAMING_SNAKE_CASE ) or line.startswith(__SCREAMING_SNAKE_CASE )):
_SCREAMING_SNAKE_CASE : Dict = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_SCREAMING_SNAKE_CASE : int = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_SCREAMING_SNAKE_CASE : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_SCREAMING_SNAKE_CASE : Optional[int] = False
else:
new_lines.append(__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , """w""" ) as f:
for line in new_lines:
f.write(__SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None )-> Optional[Any]:
if fail is not None:
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {l.strip() for l in f.readlines()}
else:
_SCREAMING_SNAKE_CASE : str = None
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
_SCREAMING_SNAKE_CASE : str = f.readlines()
_SCREAMING_SNAKE_CASE : str = defaultdict(__SCREAMING_SNAKE_CASE )
for line in correct_lines:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
lowerCAmelCase_ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 635 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase_ = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719 | """simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCAmelCase_ = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> str:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = create_model(
"""HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : Optional[Any] = R""".*sequential.(\d+).*"""
_SCREAMING_SNAKE_CASE : Any = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# replace sequential layers with list
_SCREAMING_SNAKE_CASE : List[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 )
_SCREAMING_SNAKE_CASE : Dict = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.""" )
elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[str] = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_SCREAMING_SNAKE_CASE : Dict = 1 if projecton_layer == 0 else 2
_SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
_SCREAMING_SNAKE_CASE : Dict = value
_SCREAMING_SNAKE_CASE : List[Any] = mixed_qkv.size(0 ) // 3
_SCREAMING_SNAKE_CASE : Optional[Any] = mixed_qkv[:qkv_dim]
_SCREAMING_SNAKE_CASE : str = mixed_qkv[qkv_dim : qkv_dim * 2]
_SCREAMING_SNAKE_CASE : Any = mixed_qkv[qkv_dim * 2 :]
_SCREAMING_SNAKE_CASE : Dict = query_layer
_SCREAMING_SNAKE_CASE : List[Any] = key_layer
_SCREAMING_SNAKE_CASE : Dict = value_layer
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = value
return model_state_dict
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> List[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE )
clap_model.eval()
_SCREAMING_SNAKE_CASE : Dict = clap_model.state_dict()
_SCREAMING_SNAKE_CASE : Tuple = rename_state_dict(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : int = ClapConfig()
_SCREAMING_SNAKE_CASE : Tuple = enable_fusion
_SCREAMING_SNAKE_CASE : Dict = ClapModel(__SCREAMING_SNAKE_CASE )
# ignore the spectrogram embedding layer
model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
lowerCAmelCase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 635 | 0 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase_ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase_ = object()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
_SCREAMING_SNAKE_CASE : int = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(__SCREAMING_SNAKE_CASE ) - len(__SCREAMING_SNAKE_CASE ) + 1 ):
_SCREAMING_SNAKE_CASE : List[Any] = [x.match(__SCREAMING_SNAKE_CASE ) for x, y in zip(__SCREAMING_SNAKE_CASE , ks[i:] )]
if matches and all(__SCREAMING_SNAKE_CASE ):
return True
return False
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[Any]:
def replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for rule, replacement in rules:
if _match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return replacement
return val
return replace
def lowerCamelCase_()-> int:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , __SCREAMING_SNAKE_CASE )),
(("transformer", "wte", "embedding"), P("""mp""" , __SCREAMING_SNAKE_CASE )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__SCREAMING_SNAKE_CASE , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , __SCREAMING_SNAKE_CASE )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__SCREAMING_SNAKE_CASE , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , __SCREAMING_SNAKE_CASE )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str:
_SCREAMING_SNAKE_CASE : str = _get_partition_rules()
_SCREAMING_SNAKE_CASE : Any = _replacement_rules(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : int = {k: _unmatched for k in flatten_dict(__SCREAMING_SNAKE_CASE )}
_SCREAMING_SNAKE_CASE : Dict = {k: replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__SCREAMING_SNAKE_CASE ) )
| 720 | """simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_A , )
assert hasattr(self , """env""")
def _lowerCAmelCase ( self : Union[str, Any] , _A : str=1):
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , )
def _lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any]):
"""simple docstring"""
TrainingJobAnalytics(_A).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""")
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_SCREAMING_SNAKE_CASE : Any = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_SCREAMING_SNAKE_CASE : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""])
_SCREAMING_SNAKE_CASE : Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_SCREAMING_SNAKE_CASE : int = (
Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy)
assert all(t <= self.results["""eval_loss"""] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""") as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _A)
| 635 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'''configuration_conditional_detr''': [
'''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ConditionalDetrConfig''',
'''ConditionalDetrOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''ConditionalDetrFeatureExtractor''']
lowerCAmelCase_ = ['''ConditionalDetrImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConditionalDetrForObjectDetection''',
'''ConditionalDetrForSegmentation''',
'''ConditionalDetrModel''',
'''ConditionalDetrPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 721 | """simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any:
return max(metric_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for gt in ground_truths )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]:
_SCREAMING_SNAKE_CASE : List[str] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()]
_SCREAMING_SNAKE_CASE : Dict = []
if args.gold_data_mode == "qa":
_SCREAMING_SNAKE_CASE : int = pd.read_csv(__SCREAMING_SNAKE_CASE , sep="""\t""" , header=__SCREAMING_SNAKE_CASE )
for answer_list in data[1]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ast.literal_eval(__SCREAMING_SNAKE_CASE )
answers.append(__SCREAMING_SNAKE_CASE )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()]
_SCREAMING_SNAKE_CASE : Optional[int] = [[reference] for reference in references]
_SCREAMING_SNAKE_CASE : Optional[int] = 0
for prediction, ground_truths in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
total += 1
em += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
fa += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = 1_00.0 * em / total
_SCREAMING_SNAKE_CASE : Optional[Any] = 1_00.0 * fa / total
logger.info(F"""F1: {fa:.2f}""" )
logger.info(F"""EM: {em:.2f}""" )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Tuple = args.k
_SCREAMING_SNAKE_CASE : int = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()]
_SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()]
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
for hypo, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = set(hypo.split("""\t""" )[:k] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_SCREAMING_SNAKE_CASE : int = 1_00.0 * em / total
logger.info(F"""Precision@{k}: {em: .2f}""" )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
def strip_title(__SCREAMING_SNAKE_CASE ):
if title.startswith("""\"""" ):
_SCREAMING_SNAKE_CASE : Optional[int] = title[1:]
if title.endswith("""\"""" ):
_SCREAMING_SNAKE_CASE : str = title[:-1]
return title
_SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , )["""input_ids"""].to(args.device )
_SCREAMING_SNAKE_CASE : List[str] = rag_model.rag.question_encoder(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Any = question_enc_outputs[0]
_SCREAMING_SNAKE_CASE : List[Any] = rag_model.retriever(
__SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
_SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
for docs in all_docs:
_SCREAMING_SNAKE_CASE : str = [strip_title(__SCREAMING_SNAKE_CASE ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(__SCREAMING_SNAKE_CASE ) )
return provenance_strings
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]:
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.input_ids.to(args.device )
_SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.attention_mask.to(args.device )
_SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.generate( # rag_model overwrites generate
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.generator_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
if args.print_predictions:
for q, a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
logger.info("""Q: {} - A: {}""".format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
return answers
def lowerCamelCase_()-> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__SCREAMING_SNAKE_CASE , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=__SCREAMING_SNAKE_CASE , choices=["""exact""", """compressed""", """legacy"""] , type=__SCREAMING_SNAKE_CASE , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=__SCREAMING_SNAKE_CASE , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__SCREAMING_SNAKE_CASE , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=__SCREAMING_SNAKE_CASE , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=__SCREAMING_SNAKE_CASE , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=__SCREAMING_SNAKE_CASE , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=__SCREAMING_SNAKE_CASE , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
_SCREAMING_SNAKE_CASE : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if args.model_type is None:
_SCREAMING_SNAKE_CASE : Optional[int] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
_SCREAMING_SNAKE_CASE : List[Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
_SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = args.index_name
if args.index_path is not None:
_SCREAMING_SNAKE_CASE : Any = args.index_path
else:
_SCREAMING_SNAKE_CASE : Any = BartForConditionalGeneration
_SCREAMING_SNAKE_CASE : int = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , __SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
_SCREAMING_SNAKE_CASE : Tuple = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(__SCREAMING_SNAKE_CASE ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
_SCREAMING_SNAKE_CASE : str = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , retriever=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
model.retriever.init_retrieval()
else:
_SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
_SCREAMING_SNAKE_CASE : str = []
for line in tqdm(__SCREAMING_SNAKE_CASE ):
questions.append(line.strip() )
if len(__SCREAMING_SNAKE_CASE ) == args.eval_batch_size:
_SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) + """\n""" )
preds_file.flush()
_SCREAMING_SNAKE_CASE : Any = []
if len(__SCREAMING_SNAKE_CASE ) > 0:
_SCREAMING_SNAKE_CASE : List[str] = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
preds_file.flush()
score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCAmelCase_ = get_args()
main(args)
| 635 | 0 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
"""simple docstring"""
def __init__( self : Any , _A : Optional[Any] , _A : Tuple=1_3 , _A : Optional[Any]=7 , _A : Dict=True , _A : Optional[Any]=True , _A : str=True , _A : Union[str, Any]=True , _A : Optional[Any]=True , _A : Tuple=False , _A : Optional[int]=False , _A : str=False , _A : int=2 , _A : Union[str, Any]=9_9 , _A : int=0 , _A : Dict=3_2 , _A : List[str]=5 , _A : Any=4 , _A : str=0.1 , _A : Any=0.1 , _A : int=5_1_2 , _A : List[Any]=2 , _A : Union[str, Any]=0.02 , _A : Optional[Any]=2 , _A : List[str]=4 , _A : Optional[int]="last" , _A : str=True , _A : List[str]=None , _A : List[Any]=0 , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = parent
_SCREAMING_SNAKE_CASE : Tuple = batch_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
_SCREAMING_SNAKE_CASE : int = is_training
_SCREAMING_SNAKE_CASE : Dict = use_input_lengths
_SCREAMING_SNAKE_CASE : Optional[int] = use_token_type_ids
_SCREAMING_SNAKE_CASE : List[Any] = use_labels
_SCREAMING_SNAKE_CASE : List[Any] = gelu_activation
_SCREAMING_SNAKE_CASE : str = sinusoidal_embeddings
_SCREAMING_SNAKE_CASE : Dict = causal
_SCREAMING_SNAKE_CASE : Dict = asm
_SCREAMING_SNAKE_CASE : str = n_langs
_SCREAMING_SNAKE_CASE : Tuple = vocab_size
_SCREAMING_SNAKE_CASE : Dict = n_special
_SCREAMING_SNAKE_CASE : str = hidden_size
_SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
_SCREAMING_SNAKE_CASE : str = num_attention_heads
_SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : int = max_position_embeddings
_SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
_SCREAMING_SNAKE_CASE : str = num_labels
_SCREAMING_SNAKE_CASE : Any = num_choices
_SCREAMING_SNAKE_CASE : List[Any] = summary_type
_SCREAMING_SNAKE_CASE : Tuple = use_proj
_SCREAMING_SNAKE_CASE : Dict = scope
_SCREAMING_SNAKE_CASE : str = bos_token_id
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_SCREAMING_SNAKE_CASE : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_SCREAMING_SNAKE_CASE : str = None
if self.use_input_lengths:
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
_SCREAMING_SNAKE_CASE : Any = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
_SCREAMING_SNAKE_CASE : Optional[Any] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , 2).float()
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices)
_SCREAMING_SNAKE_CASE : int = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] , _A : List[str] , _A : int , _A : Dict , _A : Optional[Any] , _A : Optional[Any] , _A : Any , _A : List[str] , _A : Optional[int] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = XLMModel(config=A_)
model.to(A_)
model.eval()
_SCREAMING_SNAKE_CASE : Optional[Any] = model(A_ , lengths=A_ , langs=A_)
_SCREAMING_SNAKE_CASE : Tuple = model(A_ , langs=A_)
_SCREAMING_SNAKE_CASE : Tuple = model(A_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self : int , _A : str , _A : Union[str, Any] , _A : Any , _A : Any , _A : Any , _A : Union[str, Any] , _A : List[str] , _A : List[str] , _A : List[str] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = XLMWithLMHeadModel(A_)
model.to(A_)
model.eval()
_SCREAMING_SNAKE_CASE : Any = model(A_ , token_type_ids=A_ , labels=A_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self : Union[str, Any] , _A : Tuple , _A : Tuple , _A : str , _A : int , _A : str , _A : Optional[Any] , _A : Any , _A : Any , _A : Dict , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = XLMForQuestionAnsweringSimple(A_)
model.to(A_)
model.eval()
_SCREAMING_SNAKE_CASE : str = model(A_)
_SCREAMING_SNAKE_CASE : int = model(A_ , start_positions=A_ , end_positions=A_)
_SCREAMING_SNAKE_CASE : Dict = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self : Any , _A : Optional[int] , _A : Dict , _A : Optional[Any] , _A : List[Any] , _A : List[str] , _A : List[Any] , _A : Optional[Any] , _A : str , _A : Any , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = XLMForQuestionAnswering(A_)
model.to(A_)
model.eval()
_SCREAMING_SNAKE_CASE : Optional[int] = model(A_)
_SCREAMING_SNAKE_CASE : Optional[Any] = model(
A_ , start_positions=A_ , end_positions=A_ , cls_index=A_ , is_impossible=A_ , p_mask=A_ , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(
A_ , start_positions=A_ , end_positions=A_ , cls_index=A_ , is_impossible=A_ , )
((_SCREAMING_SNAKE_CASE ) , ) : Optional[int] = result_with_labels.to_tuple()
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(A_ , start_positions=A_ , end_positions=A_)
((_SCREAMING_SNAKE_CASE ) , ) : Optional[int] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def _lowerCAmelCase ( self : int , _A : Union[str, Any] , _A : Tuple , _A : int , _A : Union[str, Any] , _A : List[Any] , _A : Optional[Any] , _A : Tuple , _A : Union[str, Any] , _A : str , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = XLMForSequenceClassification(A_)
model.to(A_)
model.eval()
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(A_)
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(A_ , labels=A_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] , _A : str , _A : Tuple , _A : List[str] , _A : Dict , _A : Dict , _A : Union[str, Any] , _A : Dict , _A : Any , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
_SCREAMING_SNAKE_CASE : Optional[int] = XLMForTokenClassification(A_)
model.to(A_)
model.eval()
_SCREAMING_SNAKE_CASE : List[Any] = model(A_ , attention_mask=A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self : Optional[int] , _A : int , _A : str , _A : int , _A : Tuple , _A : List[Any] , _A : List[str] , _A : List[str] , _A : Optional[Any] , _A : int , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_choices
_SCREAMING_SNAKE_CASE : str = XLMForMultipleChoice(config=A_)
model.to(A_)
model.eval()
_SCREAMING_SNAKE_CASE : Dict = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_SCREAMING_SNAKE_CASE : Any = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_SCREAMING_SNAKE_CASE : List[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_SCREAMING_SNAKE_CASE : Any = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : Union[str, Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
a = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
a = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self : List[str] , _A : Optional[int] , _A : Optional[int] , _A : Tuple , _A : Any , _A : str):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self : Optional[int] , _A : Optional[int] , _A : Tuple , _A : str=False):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = super()._prepare_for_class(A_ , A_ , return_labels=A_)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_)
_SCREAMING_SNAKE_CASE : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_)
return inputs_dict
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = XLMModelTester(self)
_SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=A_ , emb_dim=3_7)
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A_)
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A_)
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A_)
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A_)
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A_)
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A_)
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A_)
def _lowerCAmelCase ( self : Any , _A : Dict , _A : List[Any] , _A : List[Any] , _A : Dict , _A : Union[str, Any] , _A : List[Any]=False , _A : Tuple=1):
"""simple docstring"""
self.assertIsInstance(A_ , A_)
self.assertListEqual(
[isinstance(A_ , A_) for iter_attentions in attentions] , [True] * len(A_))
self.assertEqual(len(A_) , (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(A_):
# adds PAD dummy token
_SCREAMING_SNAKE_CASE : str = min_length + idx + 1
_SCREAMING_SNAKE_CASE : int = min_length + idx + 1
_SCREAMING_SNAKE_CASE : str = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(A_))
def _lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : List[str] , _A : Dict , _A : Optional[Any] , _A : Dict , _A : Tuple=False , _A : Optional[Any]=1):
"""simple docstring"""
self.assertIsInstance(A_ , A_)
self.assertListEqual(
[isinstance(A_ , A_) for iter_hidden_states in hidden_states] , [True] * len(A_) , )
self.assertEqual(len(A_) , (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(A_):
# adds PAD dummy token
_SCREAMING_SNAKE_CASE : List[str] = min_length + idx + 1
_SCREAMING_SNAKE_CASE : List[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(A_) , )
pass
@slow
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = XLMModel.from_pretrained(A_)
self.assertIsNotNone(A_)
@require_torch
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""")
model.to(A_)
_SCREAMING_SNAKE_CASE : int = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=A_) # the president
_SCREAMING_SNAKE_CASE : Dict = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_SCREAMING_SNAKE_CASE : List[Any] = model.generate(A_ , do_sample=A_)
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , A_) | 700 | """simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowerCamelCase_(__SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=1_026 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , __SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , )-> Union[str, Any]:
set_seed(3 )
# generate train_data and objective_set
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = generate_datasets(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , number=__SCREAMING_SNAKE_CASE , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_SCREAMING_SNAKE_CASE : Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
_SCREAMING_SNAKE_CASE : Any = load_gpta("""gpt2""" ).to(__SCREAMING_SNAKE_CASE )
print("""computing perplexity on objective set""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).item()
print("""perplexity on objective set:""" , __SCREAMING_SNAKE_CASE )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=15 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE="igf_model.pt" , )-> Optional[int]:
set_seed(42 )
# Load pre-trained model
_SCREAMING_SNAKE_CASE : Any = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
_SCREAMING_SNAKE_CASE : Union[str, Any] = SecondaryLearner(__SCREAMING_SNAKE_CASE )
# Train secondary learner
_SCREAMING_SNAKE_CASE : Any = train_secondary_learner(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_epochs=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , eval_freq=100 , igf_model_path=__SCREAMING_SNAKE_CASE , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_000 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=recopy_gpta , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Tuple = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = RandomSampler(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = DataLoader(__SCREAMING_SNAKE_CASE , sampler=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(__SCREAMING_SNAKE_CASE )) + 1
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : Any = torch.zeros((1, context_len) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = recopy_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
model.train()
if secondary_learner is not None:
secondary_learner.to(__SCREAMING_SNAKE_CASE )
secondary_learner.eval()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Optional[int] = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : int = []
# Compute the performance of the transformer model at the beginning
_SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
test_perps.append(__SCREAMING_SNAKE_CASE )
print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE )
for epoch in range(int(__SCREAMING_SNAKE_CASE ) ):
for step, example in enumerate(__SCREAMING_SNAKE_CASE ):
torch.cuda.empty_cache()
_SCREAMING_SNAKE_CASE : Any = random.randint(0 , example.size(2 ) - context_len - 1 )
_SCREAMING_SNAKE_CASE : int = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[str] = True
if secondary_learner is not None:
_SCREAMING_SNAKE_CASE : List[Any] = secondary_learner.forward(
torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__SCREAMING_SNAKE_CASE ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
_SCREAMING_SNAKE_CASE : Dict = -1
if predicted_q < threshold:
_SCREAMING_SNAKE_CASE : List[str] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_SCREAMING_SNAKE_CASE : Any = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
test_perps.append(__SCREAMING_SNAKE_CASE )
print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowerCamelCase_()-> Tuple:
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=__SCREAMING_SNAKE_CASE , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1_000 , type=__SCREAMING_SNAKE_CASE , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=128 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=__SCREAMING_SNAKE_CASE , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1_026 , type=__SCREAMING_SNAKE_CASE , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=__SCREAMING_SNAKE_CASE , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=__SCREAMING_SNAKE_CASE , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=__SCREAMING_SNAKE_CASE , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
_SCREAMING_SNAKE_CASE : Optional[int] = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
_SCREAMING_SNAKE_CASE : int = training_secondary_learner(
__SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
_SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=__SCREAMING_SNAKE_CASE , secondary_learner=__SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 635 | 0 |
"""simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
_SCREAMING_SNAKE_CASE : str = [1]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = 0, 0, 0
_SCREAMING_SNAKE_CASE : Dict = ugly_nums[ia] * 2
_SCREAMING_SNAKE_CASE : Optional[int] = ugly_nums[ia] * 3
_SCREAMING_SNAKE_CASE : Optional[Any] = ugly_nums[ia] * 5
for _ in range(1 , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
ugly_nums.append(__SCREAMING_SNAKE_CASE )
if next_num == next_a:
ia += 1
_SCREAMING_SNAKE_CASE : int = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
_SCREAMING_SNAKE_CASE : Any = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
_SCREAMING_SNAKE_CASE : Optional[int] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(200) = }")
| 701 | """simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( __snake_case ):
"""simple docstring"""
a = ["image_processor", "tokenizer"]
a = "ChineseCLIPImageProcessor"
a = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Dict , _A : Tuple=None , _A : List[Any]=None , **_A : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _A , )
_SCREAMING_SNAKE_CASE : str = kwargs.pop("""feature_extractor""")
_SCREAMING_SNAKE_CASE : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(_A , _A)
_SCREAMING_SNAKE_CASE : Dict = self.image_processor
def __call__( self : Optional[int] , _A : Optional[Any]=None , _A : Any=None , _A : Tuple=None , **_A : int):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(_A , return_tensors=_A , **_A)
if images is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(_A , return_tensors=_A , **_A)
if text is not None and images is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A) , tensor_type=_A)
def _lowerCAmelCase ( self : str , *_A : Any , **_A : Any):
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A)
def _lowerCAmelCase ( self : Union[str, Any] , *_A : List[Any] , **_A : Any):
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A)
@property
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _A , )
return self.image_processor_class
| 635 | 0 |
"""simple docstring"""
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase_ : Tuple = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Dict:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Dict:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_SCREAMING_SNAKE_CASE : int = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(__SCREAMING_SNAKE_CASE , id=__SCREAMING_SNAKE_CASE )
| 702 | """simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = ['''model.decoder.embed_positions.weights''']
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]:
if "emb" in name:
_SCREAMING_SNAKE_CASE : List[Any] = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
_SCREAMING_SNAKE_CASE : List[str] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
_SCREAMING_SNAKE_CASE : int = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
_SCREAMING_SNAKE_CASE : Dict = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
_SCREAMING_SNAKE_CASE : Dict = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
_SCREAMING_SNAKE_CASE : Tuple = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
_SCREAMING_SNAKE_CASE : str = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple[Dict, Dict]:
_SCREAMING_SNAKE_CASE : str = list(state_dict.keys() )
_SCREAMING_SNAKE_CASE : Tuple = {}
for key in keys:
_SCREAMING_SNAKE_CASE : Dict = state_dict.pop(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : int = rename_keys(__SCREAMING_SNAKE_CASE )
if "in_proj_weight" in key:
# split fused qkv proj
_SCREAMING_SNAKE_CASE : str = val[:hidden_size, :]
_SCREAMING_SNAKE_CASE : Any = val[hidden_size : 2 * hidden_size, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_SCREAMING_SNAKE_CASE : int = val
else:
_SCREAMING_SNAKE_CASE : Dict = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
_SCREAMING_SNAKE_CASE : Optional[Any] = 1_024
_SCREAMING_SNAKE_CASE : str = 24
_SCREAMING_SNAKE_CASE : Any = 16
elif checkpoint == "medium":
_SCREAMING_SNAKE_CASE : Dict = 1_536
_SCREAMING_SNAKE_CASE : Union[str, Any] = 48
_SCREAMING_SNAKE_CASE : Optional[Any] = 24
elif checkpoint == "large":
_SCREAMING_SNAKE_CASE : List[Any] = 2_048
_SCREAMING_SNAKE_CASE : Optional[int] = 48
_SCREAMING_SNAKE_CASE : str = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = MusicgenDecoderConfig(
hidden_size=__SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=__SCREAMING_SNAKE_CASE , num_attention_heads=__SCREAMING_SNAKE_CASE , )
return config
@torch.no_grad()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="cpu" )-> str:
_SCREAMING_SNAKE_CASE : str = MusicGen.get_pretrained(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[str] = decoder_config_from_checkpoint(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = fairseq_model.lm.state_dict()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = rename_state_dict(
__SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size )
_SCREAMING_SNAKE_CASE : Tuple = TaEncoderModel.from_pretrained("""t5-base""" )
_SCREAMING_SNAKE_CASE : List[Any] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
_SCREAMING_SNAKE_CASE : str = MusicgenForCausalLM(__SCREAMING_SNAKE_CASE ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
_SCREAMING_SNAKE_CASE : Dict = MusicgenForConditionalGeneration(text_encoder=__SCREAMING_SNAKE_CASE , audio_encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__SCREAMING_SNAKE_CASE )
# check we can do a forward pass
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_SCREAMING_SNAKE_CASE : Dict = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[int] = model(input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
_SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""t5-base""" )
_SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
_SCREAMING_SNAKE_CASE : Optional[int] = MusicgenProcessor(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
# set the appropriate bos/pad token ids
_SCREAMING_SNAKE_CASE : Optional[Any] = 2_048
_SCREAMING_SNAKE_CASE : List[Any] = 2_048
# set other default generation config params
_SCREAMING_SNAKE_CASE : Any = int(30 * audio_encoder.config.frame_rate )
_SCREAMING_SNAKE_CASE : Tuple = True
_SCREAMING_SNAKE_CASE : int = 3.0
if pytorch_dump_folder is not None:
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(__SCREAMING_SNAKE_CASE )
processor.push_to_hub(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 635 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class _snake_case ( UpperCamelCase_ ):
"""simple docstring"""
a = "gptsan-japanese"
a = [
"past_key_values",
]
a = {
"hidden_size": "d_model",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[Any] , _A : Optional[int]=3_6_0_0_0 , _A : Optional[int]=1_2_8_0 , _A : Optional[Any]=1_0_2_4 , _A : Optional[Any]=8_1_9_2 , _A : Optional[Any]=4_0_9_6 , _A : str=1_2_8 , _A : List[str]=1_0 , _A : Union[str, Any]=0 , _A : Optional[Any]=1_6 , _A : str=1_6 , _A : Optional[int]=1_2_8 , _A : List[str]=0.0 , _A : Any=1e-5 , _A : List[Any]=False , _A : Optional[Any]=0.0 , _A : List[str]="float32" , _A : List[str]=False , _A : int=False , _A : int=False , _A : Tuple=0.002 , _A : str=False , _A : str=True , _A : List[Any]=3_5_9_9_8 , _A : Any=3_5_9_9_5 , _A : Dict=3_5_9_9_9 , **_A : Optional[Any] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = vocab_size
_SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = d_model
_SCREAMING_SNAKE_CASE : Optional[Any] = d_ff
_SCREAMING_SNAKE_CASE : int = d_ext
_SCREAMING_SNAKE_CASE : Optional[int] = d_spout
_SCREAMING_SNAKE_CASE : str = num_switch_layers
_SCREAMING_SNAKE_CASE : int = num_ext_layers
_SCREAMING_SNAKE_CASE : List[Any] = num_switch_layers + num_ext_layers
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_heads
_SCREAMING_SNAKE_CASE : Any = num_experts
_SCREAMING_SNAKE_CASE : Dict = expert_capacity
_SCREAMING_SNAKE_CASE : List[Any] = dropout_rate
_SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon
_SCREAMING_SNAKE_CASE : Union[str, Any] = router_bias
_SCREAMING_SNAKE_CASE : str = router_jitter_noise
_SCREAMING_SNAKE_CASE : List[str] = router_dtype
_SCREAMING_SNAKE_CASE : Any = router_ignore_padding_tokens
_SCREAMING_SNAKE_CASE : List[Any] = output_hidden_states
_SCREAMING_SNAKE_CASE : Union[str, Any] = output_attentions
_SCREAMING_SNAKE_CASE : Optional[int] = initializer_factor
_SCREAMING_SNAKE_CASE : Dict = output_router_logits
_SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
super().__init__(
separator_token_id=__A , pad_token_id=__A , eos_token_id=__A , **__A , )
| 703 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "sew"
def __init__( self : List[Any] , _A : Tuple=3_2 , _A : str=7_6_8 , _A : Dict=1_2 , _A : Tuple=1_2 , _A : Optional[Any]=3_0_7_2 , _A : List[str]=2 , _A : Dict="gelu" , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.0 , _A : str=0.1 , _A : Tuple=0.1 , _A : Optional[int]=0.02 , _A : Dict=1e-5 , _A : str="group" , _A : Tuple="gelu" , _A : Union[str, Any]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _A : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _A : Any=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _A : Tuple=False , _A : Tuple=1_2_8 , _A : int=1_6 , _A : Union[str, Any]=True , _A : Optional[Any]=0.05 , _A : List[Any]=1_0 , _A : Union[str, Any]=2 , _A : Tuple=0.0 , _A : Union[str, Any]=1_0 , _A : Optional[int]=0 , _A : Union[str, Any]="mean" , _A : Optional[int]=False , _A : List[Any]=False , _A : int=2_5_6 , _A : str=0 , _A : Optional[int]=1 , _A : List[Any]=2 , **_A : Dict , ):
"""simple docstring"""
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A)
_SCREAMING_SNAKE_CASE : str = hidden_size
_SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_norm
_SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_activation
_SCREAMING_SNAKE_CASE : Dict = list(_A)
_SCREAMING_SNAKE_CASE : int = list(_A)
_SCREAMING_SNAKE_CASE : int = list(_A)
_SCREAMING_SNAKE_CASE : str = conv_bias
_SCREAMING_SNAKE_CASE : Tuple = num_conv_pos_embeddings
_SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embedding_groups
_SCREAMING_SNAKE_CASE : Tuple = len(self.conv_dim)
_SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[str] = intermediate_size
_SCREAMING_SNAKE_CASE : str = squeeze_factor
_SCREAMING_SNAKE_CASE : Dict = hidden_act
_SCREAMING_SNAKE_CASE : str = num_attention_heads
_SCREAMING_SNAKE_CASE : Dict = hidden_dropout
_SCREAMING_SNAKE_CASE : Tuple = attention_dropout
_SCREAMING_SNAKE_CASE : int = activation_dropout
_SCREAMING_SNAKE_CASE : Any = feat_proj_dropout
_SCREAMING_SNAKE_CASE : str = final_dropout
_SCREAMING_SNAKE_CASE : Union[str, Any] = layerdrop
_SCREAMING_SNAKE_CASE : Any = layer_norm_eps
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : List[Any] = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_SCREAMING_SNAKE_CASE : List[Any] = apply_spec_augment
_SCREAMING_SNAKE_CASE : List[Any] = mask_time_prob
_SCREAMING_SNAKE_CASE : List[str] = mask_time_length
_SCREAMING_SNAKE_CASE : List[Any] = mask_time_min_masks
_SCREAMING_SNAKE_CASE : List[Any] = mask_feature_prob
_SCREAMING_SNAKE_CASE : int = mask_feature_length
_SCREAMING_SNAKE_CASE : List[Any] = mask_feature_min_masks
# ctc loss
_SCREAMING_SNAKE_CASE : int = ctc_loss_reduction
_SCREAMING_SNAKE_CASE : Optional[int] = ctc_zero_infinity
# sequence classification
_SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum
_SCREAMING_SNAKE_CASE : List[str] = classifier_proj_size
@property
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1)
| 635 | 0 |
"""simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]:
_SCREAMING_SNAKE_CASE : List[Any] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def lowerCamelCase_(__SCREAMING_SNAKE_CASE = 100 )-> Any:
_SCREAMING_SNAKE_CASE : str = 1
_SCREAMING_SNAKE_CASE : Dict = 2
for i in range(2 , max_n + 1 ):
_SCREAMING_SNAKE_CASE : Optional[int] = pre_numerator
_SCREAMING_SNAKE_CASE : List[Any] = 2 * i // 3 if i % 3 == 0 else 1
_SCREAMING_SNAKE_CASE : str = cur_numerator
_SCREAMING_SNAKE_CASE : Any = e_cont * pre_numerator + temp
return sum_digits(_lowerCamelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 704 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 635 | 0 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
lowerCAmelCase_ = '''examples/'''
lowerCAmelCase_ = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
lowerCAmelCase_ = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
lowerCAmelCase_ = '''README.md'''
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple:
with open(__SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_SCREAMING_SNAKE_CASE : int = f.read()
_SCREAMING_SNAKE_CASE : Union[str, Any] = REPLACE_PATTERNS[pattern]
_SCREAMING_SNAKE_CASE : Union[str, Any] = replace.replace("""VERSION""" , __SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = re_pattern.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[Any]:
for folder, directories, fnames in os.walk(__SCREAMING_SNAKE_CASE ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , pattern="""examples""" )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> str:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not patch:
update_version_in_examples(__SCREAMING_SNAKE_CASE )
def lowerCamelCase_()-> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = """🤗 Transformers currently provides the following architectures"""
_SCREAMING_SNAKE_CASE : Optional[int] = """1. Want to contribute a new model?"""
with open(__SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_SCREAMING_SNAKE_CASE : List[Any] = f.readlines()
# Find the start of the list.
_SCREAMING_SNAKE_CASE : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_SCREAMING_SNAKE_CASE : List[str] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
_SCREAMING_SNAKE_CASE : Optional[int] = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__SCREAMING_SNAKE_CASE )
def lowerCamelCase_()-> str:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
_SCREAMING_SNAKE_CASE : Optional[Any] = f.read()
_SCREAMING_SNAKE_CASE : int = REPLACE_PATTERNS["""init"""][0].search(__SCREAMING_SNAKE_CASE ).groups()[0]
return packaging.version.parse(__SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE=False )-> str:
_SCREAMING_SNAKE_CASE : Dict = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
_SCREAMING_SNAKE_CASE : Optional[int] = default_version.base_version
elif patch:
_SCREAMING_SNAKE_CASE : int = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_SCREAMING_SNAKE_CASE : Tuple = input(F"""Which version are you releasing? [{default_version}]""" )
if len(__SCREAMING_SNAKE_CASE ) == 0:
_SCREAMING_SNAKE_CASE : List[str] = default_version
print(F"""Updating version to {version}.""" )
global_version_update(__SCREAMING_SNAKE_CASE , patch=__SCREAMING_SNAKE_CASE )
def lowerCamelCase_()-> Tuple:
_SCREAMING_SNAKE_CASE : Optional[Any] = get_version()
_SCREAMING_SNAKE_CASE : Tuple = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_SCREAMING_SNAKE_CASE : List[str] = current_version.base_version
# Check with the user we got that right.
_SCREAMING_SNAKE_CASE : List[Any] = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(__SCREAMING_SNAKE_CASE ) == 0:
_SCREAMING_SNAKE_CASE : Tuple = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(__SCREAMING_SNAKE_CASE )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
lowerCAmelCase_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 705 | """simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
_SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple:
_SCREAMING_SNAKE_CASE : int = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : List[Any] = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE : List[Any] = (
Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple:
_SCREAMING_SNAKE_CASE : Tuple = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : Dict = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str:
if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = parquet_path
elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path]
_SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=("train",) )-> Union[str, Any]:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for split in splits:
_SCREAMING_SNAKE_CASE : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
_SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : List[str] = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE : str = (
Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE : int = ParquetDatasetReader({"""train""": parquet_path} , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
if split:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {split: parquet_path}
else:
_SCREAMING_SNAKE_CASE : Optional[int] = """train"""
_SCREAMING_SNAKE_CASE : Any = {"""train""": parquet_path, """test""": parquet_path}
_SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]:
_SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / """foo.parquet""" )
_SCREAMING_SNAKE_CASE : str = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Dict = str(shared_datadir / """test_image_rgb.jpg""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""image""": [image_path]}
_SCREAMING_SNAKE_CASE : Optional[Any] = Features({"""image""": Image()} )
_SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__SCREAMING_SNAKE_CASE ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int:
assert get_writer_batch_size(__SCREAMING_SNAKE_CASE ) == expected
| 635 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowerCAmelCase_ = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowerCAmelCase_ = '''UperNetConfig'''
class _snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : str , _A : int , _A : int , _A : Union[int, Tuple[int, int]] , _A : Union[int, Tuple[int, int], str] = 0 , _A : bool = False , _A : Union[int, Tuple[int, int]] = 1 , ):
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Convad(
in_channels=a_ , out_channels=a_ , kernel_size=a_ , padding=a_ , bias=a_ , dilation=a_ , )
_SCREAMING_SNAKE_CASE : Optional[int] = nn.BatchNormad(a_)
_SCREAMING_SNAKE_CASE : Tuple = nn.ReLU()
def _lowerCAmelCase ( self : Optional[Any] , _A : torch.Tensor):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv(a_)
_SCREAMING_SNAKE_CASE : Optional[int] = self.batch_norm(a_)
_SCREAMING_SNAKE_CASE : Dict = self.activation(a_)
return output
class _snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , _A : int , _A : int , _A : int):
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE : List[str] = [
nn.AdaptiveAvgPoolad(a_),
UperNetConvModule(a_ , a_ , kernel_size=1),
]
for i, layer in enumerate(self.layers):
self.add_module(str(a_) , a_)
def _lowerCAmelCase ( self : List[Any] , _A : torch.Tensor):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = input
for layer in self.layers:
_SCREAMING_SNAKE_CASE : str = layer(a_)
return hidden_state
class _snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , _A : Tuple[int, ...] , _A : int , _A : int , _A : bool):
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE : List[str] = pool_scales
_SCREAMING_SNAKE_CASE : Optional[Any] = align_corners
_SCREAMING_SNAKE_CASE : List[Any] = in_channels
_SCREAMING_SNAKE_CASE : Dict = channels
_SCREAMING_SNAKE_CASE : Optional[int] = []
for i, pool_scale in enumerate(a_):
_SCREAMING_SNAKE_CASE : Optional[int] = UperNetPyramidPoolingBlock(pool_scale=a_ , in_channels=a_ , channels=a_)
self.blocks.append(a_)
self.add_module(str(a_) , a_)
def _lowerCAmelCase ( self : Union[str, Any] , _A : torch.Tensor):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = []
for ppm in self.blocks:
_SCREAMING_SNAKE_CASE : Any = ppm(a_)
_SCREAMING_SNAKE_CASE : Dict = nn.functional.interpolate(
a_ , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners)
ppm_outs.append(a_)
return ppm_outs
class _snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , _A : Any , _A : Dict):
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE : Tuple = config
_SCREAMING_SNAKE_CASE : Tuple = config.pool_scales # e.g. (1, 2, 3, 6)
_SCREAMING_SNAKE_CASE : str = in_channels
_SCREAMING_SNAKE_CASE : Dict = config.hidden_size
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1)
# PSP Module
_SCREAMING_SNAKE_CASE : List[Any] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
_SCREAMING_SNAKE_CASE : int = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
_SCREAMING_SNAKE_CASE : str = nn.ModuleList()
_SCREAMING_SNAKE_CASE : Tuple = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
_SCREAMING_SNAKE_CASE : List[Any] = UperNetConvModule(a_ , self.channels , kernel_size=1)
_SCREAMING_SNAKE_CASE : Optional[Any] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1)
self.lateral_convs.append(a_)
self.fpn_convs.append(a_)
_SCREAMING_SNAKE_CASE : str = UperNetConvModule(
len(self.in_channels) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
self.apply(self._init_weights)
def _lowerCAmelCase ( self : Optional[Any] , _A : List[Any]):
"""simple docstring"""
if isinstance(a_ , nn.Convad):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
def _lowerCAmelCase ( self : List[Any] , _A : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = inputs[-1]
_SCREAMING_SNAKE_CASE : Tuple = [x]
psp_outs.extend(self.psp_modules(a_))
_SCREAMING_SNAKE_CASE : Tuple = torch.cat(a_ , dim=1)
_SCREAMING_SNAKE_CASE : Optional[Any] = self.bottleneck(a_)
return output
def _lowerCAmelCase ( self : Optional[Any] , _A : torch.Tensor):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = [lateral_conv(encoder_hidden_states[i]) for i, lateral_conv in enumerate(self.lateral_convs)]
laterals.append(self.psp_forward(a_))
# build top-down path
_SCREAMING_SNAKE_CASE : List[str] = len(a_)
for i in range(used_backbone_levels - 1 , 0 , -1):
_SCREAMING_SNAKE_CASE : Dict = laterals[i - 1].shape[2:]
_SCREAMING_SNAKE_CASE : Tuple = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=a_ , mode="""bilinear""" , align_corners=self.align_corners)
# build outputs
_SCREAMING_SNAKE_CASE : Optional[int] = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)]
# append psp feature
fpn_outs.append(laterals[-1])
for i in range(used_backbone_levels - 1 , 0 , -1):
_SCREAMING_SNAKE_CASE : int = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners)
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(a_ , dim=1)
_SCREAMING_SNAKE_CASE : Any = self.fpn_bottleneck(a_)
_SCREAMING_SNAKE_CASE : Dict = self.classifier(a_)
return output
class _snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , _A : int , _A : int = 2 , _A : int = 3 , _A : Union[int, Tuple[int, int]] = 1):
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE : List[Any] = config
_SCREAMING_SNAKE_CASE : Dict = config.auxiliary_in_channels
_SCREAMING_SNAKE_CASE : Dict = config.auxiliary_channels
_SCREAMING_SNAKE_CASE : int = config.auxiliary_num_convs
_SCREAMING_SNAKE_CASE : Any = config.auxiliary_concat_input
_SCREAMING_SNAKE_CASE : Union[str, Any] = in_index
_SCREAMING_SNAKE_CASE : int = (kernel_size // 2) * dilation
_SCREAMING_SNAKE_CASE : Tuple = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=a_ , padding=a_ , dilation=a_))
for i in range(self.num_convs - 1):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=a_ , padding=a_ , dilation=a_))
if self.num_convs == 0:
_SCREAMING_SNAKE_CASE : str = nn.Identity()
else:
_SCREAMING_SNAKE_CASE : List[Any] = nn.Sequential(*a_)
if self.concat_input:
_SCREAMING_SNAKE_CASE : Dict = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=a_ , padding=kernel_size // 2)
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1)
def _lowerCAmelCase ( self : int):
"""simple docstring"""
self.apply(self._init_weights)
def _lowerCAmelCase ( self : Dict , _A : List[str]):
"""simple docstring"""
if isinstance(a_ , nn.Convad):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
def _lowerCAmelCase ( self : List[Any] , _A : torch.Tensor):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = encoder_hidden_states[self.in_index]
_SCREAMING_SNAKE_CASE : Optional[Any] = self.convs(a_)
if self.concat_input:
_SCREAMING_SNAKE_CASE : Any = self.conv_cat(torch.cat([hidden_states, output] , dim=1))
_SCREAMING_SNAKE_CASE : str = self.classifier(a_)
return output
class _snake_case ( __a ):
"""simple docstring"""
a = UperNetConfig
a = "pixel_values"
a = True
def _lowerCAmelCase ( self : Dict , _A : Optional[int]):
"""simple docstring"""
if isinstance(a_ , a_):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _lowerCAmelCase ( self : str):
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _lowerCAmelCase ( self : int , _A : Union[str, Any] , _A : str=False):
"""simple docstring"""
if isinstance(a_ , a_):
_SCREAMING_SNAKE_CASE : Union[str, Any] = value
lowerCAmelCase_ = R'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCAmelCase_ = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , __a , )
class _snake_case ( __a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _A : Any):
"""simple docstring"""
super().__init__(a_)
_SCREAMING_SNAKE_CASE : Tuple = AutoBackbone.from_config(config.backbone_config)
# Semantic segmentation head(s)
_SCREAMING_SNAKE_CASE : int = UperNetHead(a_ , in_channels=self.backbone.channels)
_SCREAMING_SNAKE_CASE : Tuple = UperNetFCNHead(a_) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length"""))
@replace_return_docstrings(output_type=a_ , config_class=_CONFIG_FOR_DOC)
def _lowerCAmelCase ( self : Union[str, Any] , _A : Optional[torch.Tensor] = None , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : Optional[torch.Tensor] = None , _A : Optional[bool] = None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions
_SCREAMING_SNAKE_CASE : str = self.backbone.forward_with_filtered_kwargs(
a_ , output_hidden_states=a_ , output_attentions=a_)
_SCREAMING_SNAKE_CASE : Dict = outputs.feature_maps
_SCREAMING_SNAKE_CASE : Optional[Any] = self.decode_head(a_)
_SCREAMING_SNAKE_CASE : int = nn.functional.interpolate(a_ , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=a_)
_SCREAMING_SNAKE_CASE : Dict = None
if self.auxiliary_head is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.auxiliary_head(a_)
_SCREAMING_SNAKE_CASE : List[str] = nn.functional.interpolate(
a_ , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=a_)
_SCREAMING_SNAKE_CASE : Any = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""")
else:
# compute weighted loss
_SCREAMING_SNAKE_CASE : str = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index)
_SCREAMING_SNAKE_CASE : Any = loss_fct(a_ , a_)
_SCREAMING_SNAKE_CASE : List[str] = loss_fct(a_ , a_)
_SCREAMING_SNAKE_CASE : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
_SCREAMING_SNAKE_CASE : List[str] = (logits,) + outputs[1:]
else:
_SCREAMING_SNAKE_CASE : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=a_ , logits=a_ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 706 | """simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""only integers accepted as input""" )
else:
_SCREAMING_SNAKE_CASE : List[Any] = str(abs(__SCREAMING_SNAKE_CASE ) )
_SCREAMING_SNAKE_CASE : List[str] = [list(__SCREAMING_SNAKE_CASE ) for char in range(len(__SCREAMING_SNAKE_CASE ) )]
for index in range(len(__SCREAMING_SNAKE_CASE ) ):
num_transpositions[index].pop(__SCREAMING_SNAKE_CASE )
return max(
int("""""".join(list(__SCREAMING_SNAKE_CASE ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 635 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool:
_SCREAMING_SNAKE_CASE : List[Any] = str(__snake_case )
return n == n[::-1]
def lowerCamelCase_(__SCREAMING_SNAKE_CASE = 1_000_000 )-> Optional[int]:
_SCREAMING_SNAKE_CASE : Tuple = 0
for i in range(1 , __snake_case ):
if is_palindrome(__snake_case ) and is_palindrome(bin(__snake_case ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 707 | """simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A)
_SCREAMING_SNAKE_CASE : Any = -1
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
_SCREAMING_SNAKE_CASE : Dict = model.generate(_A , max_new_tokens=1_0 , do_sample=_A)
_SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(greedy_ids[0])
with CaptureStdout() as cs:
_SCREAMING_SNAKE_CASE : Any = TextStreamer(_A)
model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_SCREAMING_SNAKE_CASE : str = cs.out[:-1]
self.assertEqual(_A , _A)
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
_SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A)
_SCREAMING_SNAKE_CASE : List[Any] = -1
_SCREAMING_SNAKE_CASE : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A)
_SCREAMING_SNAKE_CASE : Any = tokenizer.decode(greedy_ids[0])
_SCREAMING_SNAKE_CASE : List[Any] = TextIteratorStreamer(_A)
_SCREAMING_SNAKE_CASE : Any = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer}
_SCREAMING_SNAKE_CASE : List[Any] = Thread(target=model.generate , kwargs=_A)
thread.start()
_SCREAMING_SNAKE_CASE : Any = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_A , _A)
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
_SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A)
_SCREAMING_SNAKE_CASE : Any = -1
_SCREAMING_SNAKE_CASE : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A)
_SCREAMING_SNAKE_CASE : str = greedy_ids[:, input_ids.shape[1] :]
_SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(new_greedy_ids[0])
with CaptureStdout() as cs:
_SCREAMING_SNAKE_CASE : Any = TextStreamer(_A , skip_prompt=_A)
model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_SCREAMING_SNAKE_CASE : Optional[int] = cs.out[:-1]
self.assertEqual(_A , _A)
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""distilgpt2""")
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""").to(_A)
_SCREAMING_SNAKE_CASE : int = -1
_SCREAMING_SNAKE_CASE : List[str] = torch.ones((1, 5) , device=_A).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_SCREAMING_SNAKE_CASE : Optional[int] = TextStreamer(_A , skip_special_tokens=_A)
model.generate(_A , max_new_tokens=1 , do_sample=_A , streamer=_A)
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_SCREAMING_SNAKE_CASE : Optional[Any] = cs.out[:-1] # Remove the final "\n"
_SCREAMING_SNAKE_CASE : Tuple = tokenizer(_A , return_tensors="""pt""")
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1))
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
_SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A)
_SCREAMING_SNAKE_CASE : Tuple = -1
_SCREAMING_SNAKE_CASE : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
_SCREAMING_SNAKE_CASE : int = TextIteratorStreamer(_A , timeout=0.001)
_SCREAMING_SNAKE_CASE : List[Any] = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer}
_SCREAMING_SNAKE_CASE : List[str] = Thread(target=model.generate , kwargs=_A)
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_A):
_SCREAMING_SNAKE_CASE : str = """"""
for new_text in streamer:
streamer_text += new_text
| 635 | 0 |
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( _UpperCAmelCase ):
"""simple docstring"""
a = (CMStochasticIterativeScheduler,)
a = 10
def _lowerCAmelCase ( self : Optional[Any] , **_A : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowercase_)
return config
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = 1_0
_SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0](**lowercase_)
scheduler.set_timesteps(lowercase_)
_SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[0]
_SCREAMING_SNAKE_CASE : Optional[int] = scheduler.timesteps[1]
_SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample
_SCREAMING_SNAKE_CASE : Any = 0.1 * sample
_SCREAMING_SNAKE_CASE : List[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_).prev_sample
_SCREAMING_SNAKE_CASE : Optional[int] = scheduler.step(lowercase_ , lowercase_ , lowercase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase_)
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowercase_)
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE : Dict = scheduler_class(**lowercase_)
_SCREAMING_SNAKE_CASE : Optional[int] = 1
scheduler.set_timesteps(lowercase_)
_SCREAMING_SNAKE_CASE : Any = scheduler.timesteps
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0)
_SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
_SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowercase_):
# 1. scale model input
_SCREAMING_SNAKE_CASE : int = scheduler.scale_model_input(lowercase_ , lowercase_)
# 2. predict noise residual
_SCREAMING_SNAKE_CASE : Optional[int] = model(lowercase_ , lowercase_)
# 3. predict previous sample x_t-1
_SCREAMING_SNAKE_CASE : List[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_).prev_sample
_SCREAMING_SNAKE_CASE : str = pred_prev_sample
_SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(lowercase_))
_SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 1_9_2.7_6_1_4) < 1e-2
assert abs(result_mean.item() - 0.2_510) < 1e-3
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowercase_)
_SCREAMING_SNAKE_CASE : List[Any] = [1_0_6, 0]
scheduler.set_timesteps(timesteps=lowercase_)
_SCREAMING_SNAKE_CASE : str = scheduler.timesteps
_SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0)
_SCREAMING_SNAKE_CASE : List[Any] = self.dummy_model()
_SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
_SCREAMING_SNAKE_CASE : Any = scheduler.scale_model_input(lowercase_ , lowercase_)
# 2. predict noise residual
_SCREAMING_SNAKE_CASE : List[Any] = model(lowercase_ , lowercase_)
# 3. predict previous sample x_t-1
_SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_).prev_sample
_SCREAMING_SNAKE_CASE : Dict = pred_prev_sample
_SCREAMING_SNAKE_CASE : List[str] = torch.sum(torch.abs(lowercase_))
_SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 3_4_7.6_3_5_7) < 1e-2
assert abs(result_mean.item() - 0.4_527) < 1e-3
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowercase_)
_SCREAMING_SNAKE_CASE : List[Any] = [3_9, 3_0, 1_2, 1_5, 0]
with self.assertRaises(lowercase_ , msg="""`timesteps` must be in descending order."""):
scheduler.set_timesteps(timesteps=lowercase_)
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowercase_)
_SCREAMING_SNAKE_CASE : Union[str, Any] = [3_9, 3_0, 1_2, 1, 0]
_SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase_)
with self.assertRaises(lowercase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`."""):
scheduler.set_timesteps(num_inference_steps=lowercase_ , timesteps=lowercase_)
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE : str = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowercase_)
_SCREAMING_SNAKE_CASE : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowercase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowercase_)
| 708 | """simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "facebook/bart-large-mnli"
a = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a = "text_classifier"
a = AutoTokenizer
a = AutoModelForSequenceClassification
a = ["text", ["text"]]
a = ["text"]
def _lowerCAmelCase ( self : int):
"""simple docstring"""
super().setup()
_SCREAMING_SNAKE_CASE : Any = self.model.config
_SCREAMING_SNAKE_CASE : Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail"""):
_SCREAMING_SNAKE_CASE : List[Any] = int(_A)
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""")
def _lowerCAmelCase ( self : Optional[Any] , _A : Tuple , _A : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = labels
return self.pre_processor(
[text] * len(_A) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def _lowerCAmelCase ( self : Tuple , _A : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = outputs.logits
_SCREAMING_SNAKE_CASE : List[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 635 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( a__ ):
"""simple docstring"""
a = ["""image_processor""", """tokenizer"""]
a = """CLIPImageProcessor"""
a = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : Optional[Any] , _A : Union[str, Any]=None , _A : int=None , **_A : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase__ , )
_SCREAMING_SNAKE_CASE : int = kwargs.pop("""feature_extractor""")
_SCREAMING_SNAKE_CASE : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(lowerCAmelCase__ , lowerCAmelCase__)
def __call__( self : Optional[int] , _A : Optional[int]=None , _A : Optional[Any]=None , _A : Union[str, Any]=None , **_A : str):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__)
if images is not None:
_SCREAMING_SNAKE_CASE : str = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__)
if text is not None and images is not None:
_SCREAMING_SNAKE_CASE : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__) , tensor_type=lowerCAmelCase__)
def _lowerCAmelCase ( self : Union[str, Any] , *_A : Any , **_A : Union[str, Any]):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__)
def _lowerCAmelCase ( self : Tuple , *_A : int , **_A : int):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__)
@property
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 709 | """simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""")
_SCREAMING_SNAKE_CASE : str = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""")
model.to(_A)
from datasets import load_dataset
_SCREAMING_SNAKE_CASE : Any = load_dataset("""nielsr/rvlcdip-demo""")
_SCREAMING_SNAKE_CASE : Any = dataset["""train"""][0]["""image"""].convert("""RGB""")
_SCREAMING_SNAKE_CASE : str = image_processor(_A , return_tensors="""pt""").to(_A)
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Any = model(**_A)
_SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
_SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_6))
self.assertEqual(logits.shape , _A)
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=_A , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1e-4))
| 635 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''MobileNetV2FeatureExtractor''']
lowerCAmelCase_ = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 710 | """simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "M-CLIP"
def __init__( self : Optional[Any] , _A : List[str]=1_0_2_4 , _A : Union[str, Any]=7_6_8 , **_A : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = transformerDimSize
_SCREAMING_SNAKE_CASE : List[str] = imageDimSize
super().__init__(**_A)
class _snake_case ( __snake_case ):
"""simple docstring"""
a = MCLIPConfig
def __init__( self : Dict , _A : Optional[Any] , *_A : Any , **_A : Dict):
"""simple docstring"""
super().__init__(_A , *_A , **_A)
_SCREAMING_SNAKE_CASE : Tuple = XLMRobertaModel(_A)
_SCREAMING_SNAKE_CASE : List[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims)
def _lowerCAmelCase ( self : Union[str, Any] , _A : str , _A : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.transformer(input_ids=_A , attention_mask=_A)[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
return self.LinearTransformation(_A), embs
| 635 | 0 |
from PIL import Image
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
def brightness(__SCREAMING_SNAKE_CASE ) -> float:
return 128 + level + (c - 128)
if not -2_55.0 <= level <= 2_55.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_snake_case )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowerCAmelCase_ = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 711 | """simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
_SCREAMING_SNAKE_CASE : int = precision
_SCREAMING_SNAKE_CASE : Dict = ceil(precision / 14 )
_SCREAMING_SNAKE_CASE : int = 426_880 * Decimal(10_005 ).sqrt()
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
_SCREAMING_SNAKE_CASE : str = 13_591_409
_SCREAMING_SNAKE_CASE : Tuple = Decimal(__SCREAMING_SNAKE_CASE )
for k in range(1 , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(__SCREAMING_SNAKE_CASE ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCAmelCase_ = 50
print(F"The first {n} digits of pi is: {pi(n)}")
| 635 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _snake_case :
"""simple docstring"""
def __init__( self : Any , _A : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = num_of_nodes
_SCREAMING_SNAKE_CASE : list[list[int]] = []
_SCREAMING_SNAKE_CASE : dict[int, int] = {}
def _lowerCAmelCase ( self : Optional[int] , _A : int , _A : int , _A : int):
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight])
def _lowerCAmelCase ( self : Tuple , _A : int):
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def _lowerCAmelCase ( self : Tuple , _A : int):
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
_SCREAMING_SNAKE_CASE : Optional[int] = self.find_component(UpperCamelCase__)
def _lowerCAmelCase ( self : str , _A : list[int] , _A : int , _A : int):
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
_SCREAMING_SNAKE_CASE : int = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCamelCase__)
elif component_size[u_node] >= component_size[v_node]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.find_component(UpperCamelCase__)
component_size[u_node] += component_size[v_node]
self.set_component(UpperCamelCase__)
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
_SCREAMING_SNAKE_CASE : List[str] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_SCREAMING_SNAKE_CASE : int = edge
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.m_component[u]
_SCREAMING_SNAKE_CASE : List[str] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_SCREAMING_SNAKE_CASE : Union[str, Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCamelCase__ , UpperCamelCase__):
_SCREAMING_SNAKE_CASE : str = edge
_SCREAMING_SNAKE_CASE : List[str] = self.m_component[u]
_SCREAMING_SNAKE_CASE : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""")
num_of_components -= 1
_SCREAMING_SNAKE_CASE : Union[str, Any] = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""")
def lowerCamelCase_()-> List[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 | """simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
_SCREAMING_SNAKE_CASE : Optional[int] = TapasConfig.from_json_file(__SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
_SCREAMING_SNAKE_CASE : Dict = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_SCREAMING_SNAKE_CASE : str = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
_SCREAMING_SNAKE_CASE : Optional[int] = 4
_SCREAMING_SNAKE_CASE : Any = True
# hparam_utils.py hparams
_SCREAMING_SNAKE_CASE : Any = 0.66_46_94
_SCREAMING_SNAKE_CASE : str = 0.20_79_51
_SCREAMING_SNAKE_CASE : str = 0.12_11_94
_SCREAMING_SNAKE_CASE : List[Any] = True
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = 0.0_35_25_13
_SCREAMING_SNAKE_CASE : Optional[Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_SCREAMING_SNAKE_CASE : int = 4
_SCREAMING_SNAKE_CASE : Tuple = False
# hparam_utils.py hparams
_SCREAMING_SNAKE_CASE : Any = 36.45_19
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0.90_34_21
_SCREAMING_SNAKE_CASE : Optional[Any] = 2_22.0_88
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Optional[int] = True
_SCREAMING_SNAKE_CASE : Dict = 0.76_31_41
_SCREAMING_SNAKE_CASE : Union[str, Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
_SCREAMING_SNAKE_CASE : int = TapasForSequenceClassification(config=__SCREAMING_SNAKE_CASE )
elif task == "MLM":
_SCREAMING_SNAKE_CASE : int = TapasForMaskedLM(config=__SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
_SCREAMING_SNAKE_CASE : int = TapasModel(config=__SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
_SCREAMING_SNAKE_CASE : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 635 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 713 | """simple docstring"""
from typing import Any
import numpy as np
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool:
return np.array_equal(__SCREAMING_SNAKE_CASE , matrix.conjugate().T )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any:
_SCREAMING_SNAKE_CASE : Optional[int] = v.conjugate().T
_SCREAMING_SNAKE_CASE : Optional[int] = v_star.dot(__SCREAMING_SNAKE_CASE )
assert isinstance(__SCREAMING_SNAKE_CASE , np.ndarray )
return (v_star_dot.dot(__SCREAMING_SNAKE_CASE )) / (v_star.dot(__SCREAMING_SNAKE_CASE ))
def lowerCamelCase_()-> None:
_SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
_SCREAMING_SNAKE_CASE : int = np.array([[1], [2], [3]] )
assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian."""
print(rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
_SCREAMING_SNAKE_CASE : int = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian."""
assert rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 635 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _snake_case ( UpperCAmelCase__ ):
"""simple docstring"""
a = 'blenderbot-small'
a = ['past_key_values']
a = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : List[Any] , _A : Dict=5_0_2_6_5 , _A : Dict=5_1_2 , _A : Any=8 , _A : str=2_0_4_8 , _A : Union[str, Any]=1_6 , _A : List[str]=8 , _A : str=2_0_4_8 , _A : Optional[int]=1_6 , _A : List[Any]=0.0 , _A : int=0.0 , _A : Optional[int]=True , _A : Optional[Any]=True , _A : int="gelu" , _A : Union[str, Any]=5_1_2 , _A : Any=0.1 , _A : List[Any]=0.0 , _A : Union[str, Any]=0.0 , _A : List[Any]=0.02 , _A : Tuple=1 , _A : str=False , _A : str=0 , _A : int=1 , _A : Any=2 , _A : int=2 , **_A : Tuple , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = d_model
_SCREAMING_SNAKE_CASE = encoder_ffn_dim
_SCREAMING_SNAKE_CASE = encoder_layers
_SCREAMING_SNAKE_CASE = encoder_attention_heads
_SCREAMING_SNAKE_CASE = decoder_ffn_dim
_SCREAMING_SNAKE_CASE = decoder_layers
_SCREAMING_SNAKE_CASE = decoder_attention_heads
_SCREAMING_SNAKE_CASE = dropout
_SCREAMING_SNAKE_CASE = attention_dropout
_SCREAMING_SNAKE_CASE = activation_dropout
_SCREAMING_SNAKE_CASE = activation_function
_SCREAMING_SNAKE_CASE = init_std
_SCREAMING_SNAKE_CASE = encoder_layerdrop
_SCREAMING_SNAKE_CASE = decoder_layerdrop
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = encoder_layers
_SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , )
class _snake_case ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_SCREAMING_SNAKE_CASE = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
])
if self.use_past:
_SCREAMING_SNAKE_CASE = {0: """batch"""}
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """decoder_sequence"""}
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_A , direction="""inputs""")
elif self.task == "causal-lm":
# TODO: figure this case out.
_SCREAMING_SNAKE_CASE = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
])
if self.use_past:
_SCREAMING_SNAKE_CASE = self.num_layers
for i in range(_A):
_SCREAMING_SNAKE_CASE = {0: """batch""", 2: """past_sequence + sequence"""}
_SCREAMING_SNAKE_CASE = {0: """batch""", 2: """past_sequence + sequence"""}
else:
_SCREAMING_SNAKE_CASE = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
])
return common_inputs
@property
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_SCREAMING_SNAKE_CASE = super().outputs
else:
_SCREAMING_SNAKE_CASE = super(_A , self).outputs
if self.use_past:
_SCREAMING_SNAKE_CASE = self.num_layers
for i in range(_A):
_SCREAMING_SNAKE_CASE = {0: """batch""", 2: """past_sequence + sequence"""}
_SCREAMING_SNAKE_CASE = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def _lowerCAmelCase ( self : List[str] , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A)
# Generate decoder inputs
_SCREAMING_SNAKE_CASE = seq_length if not self.use_past else 1
_SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A)
_SCREAMING_SNAKE_CASE = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_SCREAMING_SNAKE_CASE = dict(**_A , **_A)
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""")
else:
import torch
_SCREAMING_SNAKE_CASE = common_inputs["""input_ids"""].shape
_SCREAMING_SNAKE_CASE = common_inputs["""decoder_input_ids"""].shape[1]
_SCREAMING_SNAKE_CASE = self.num_attention_heads
_SCREAMING_SNAKE_CASE = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_SCREAMING_SNAKE_CASE = decoder_seq_length + 3
_SCREAMING_SNAKE_CASE = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_SCREAMING_SNAKE_CASE = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(_A , _A)] , dim=1)
_SCREAMING_SNAKE_CASE = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_SCREAMING_SNAKE_CASE = self.num_layers
_SCREAMING_SNAKE_CASE = min(_A , _A)
_SCREAMING_SNAKE_CASE = max(_A , _A) - min_num_layers
_SCREAMING_SNAKE_CASE = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(_A):
common_inputs["past_key_values"].append(
(
torch.zeros(_A),
torch.zeros(_A),
torch.zeros(_A),
torch.zeros(_A),
))
# TODO: test this.
_SCREAMING_SNAKE_CASE = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(_A , _A):
common_inputs["past_key_values"].append((torch.zeros(_A), torch.zeros(_A)))
return common_inputs
def _lowerCAmelCase ( self : Any , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A)
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""")
else:
import torch
_SCREAMING_SNAKE_CASE = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_SCREAMING_SNAKE_CASE = seqlen + 2
_SCREAMING_SNAKE_CASE = self.num_layers
_SCREAMING_SNAKE_CASE = self.num_attention_heads
_SCREAMING_SNAKE_CASE = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_SCREAMING_SNAKE_CASE = common_inputs["""attention_mask"""].dtype
_SCREAMING_SNAKE_CASE = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(_A , _A , dtype=_A)] , dim=1)
_SCREAMING_SNAKE_CASE = [
(torch.zeros(_A), torch.zeros(_A)) for _ in range(_A)
]
return common_inputs
def _lowerCAmelCase ( self : Any , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_SCREAMING_SNAKE_CASE = tokenizer.num_special_tokens_to_add(_A)
_SCREAMING_SNAKE_CASE = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_A)
# Generate dummy inputs according to compute batch and sequence
_SCREAMING_SNAKE_CASE = [""" """.join([tokenizer.unk_token]) * seq_length] * batch_size
_SCREAMING_SNAKE_CASE = dict(tokenizer(_A , return_tensors=_A))
return common_inputs
def _lowerCAmelCase ( self : Tuple , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A)
elif self.task == "causal-lm":
_SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_causal_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A)
else:
_SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A)
return common_inputs
def _lowerCAmelCase ( self : Union[str, Any] , _A : Dict , _A : Any , _A : Dict , _A : List[Any]):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_SCREAMING_SNAKE_CASE = super()._flatten_past_key_values_(_A , _A , _A , _A)
else:
_SCREAMING_SNAKE_CASE = super(_A , self)._flatten_past_key_values_(
_A , _A , _A , _A)
| 714 | """simple docstring"""
from __future__ import annotations
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 635 | 0 |
"""simple docstring"""
from ....utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _snake_case ( lowercase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _A : Any , _A : Tuple=None , _A : int=2_0_4_8):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = config.__dict__
_SCREAMING_SNAKE_CASE : Optional[Any] = modal_hidden_size
if num_labels:
_SCREAMING_SNAKE_CASE : Tuple = num_labels
| 715 | """simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 )-> str:
_SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetDict(
{
"""train""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ),
"""validation""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_SCREAMING_SNAKE_CASE : str = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_SCREAMING_SNAKE_CASE : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_SCREAMING_SNAKE_CASE : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_SCREAMING_SNAKE_CASE : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
_SCREAMING_SNAKE_CASE : Any = 8
else:
_SCREAMING_SNAKE_CASE : Optional[int] = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = DataLoader(
tokenized_datasets["""test"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader, test_dataloader
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
# New Code #
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
# Download the dataset
_SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("""glue""" , """mrpc""" )
# Create our splits
_SCREAMING_SNAKE_CASE : Dict = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_SCREAMING_SNAKE_CASE : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE : Tuple = config["""lr"""]
_SCREAMING_SNAKE_CASE : Tuple = int(config["""num_epochs"""] )
_SCREAMING_SNAKE_CASE : int = int(config["""seed"""] )
_SCREAMING_SNAKE_CASE : int = int(config["""batch_size"""] )
_SCREAMING_SNAKE_CASE : List[str] = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_SCREAMING_SNAKE_CASE : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_SCREAMING_SNAKE_CASE : List[str] = batch_size // MAX_GPU_BATCH_SIZE
_SCREAMING_SNAKE_CASE : List[str] = MAX_GPU_BATCH_SIZE
set_seed(__SCREAMING_SNAKE_CASE )
# New Code #
# Create our folds:
_SCREAMING_SNAKE_CASE : List[str] = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_SCREAMING_SNAKE_CASE : Optional[Any] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = get_fold_dataloaders(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_SCREAMING_SNAKE_CASE : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_SCREAMING_SNAKE_CASE : int = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
_SCREAMING_SNAKE_CASE : int = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(**__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = outputs.loss
_SCREAMING_SNAKE_CASE : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , )
_SCREAMING_SNAKE_CASE : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , __SCREAMING_SNAKE_CASE )
# New Code #
# We also run predictions on the test set at the very end
_SCREAMING_SNAKE_CASE : str = []
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 )
_SCREAMING_SNAKE_CASE : List[str] = torch.stack(__SCREAMING_SNAKE_CASE , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_SCREAMING_SNAKE_CASE : int = metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )
accelerator.print("""Average test metrics from all folds:""" , __SCREAMING_SNAKE_CASE )
def lowerCamelCase_()-> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=__SCREAMING_SNAKE_CASE , default=3 , help="""The number of splits to perform across the dataset""" )
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
_SCREAMING_SNAKE_CASE : Optional[int] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 635 | 0 |
"""simple docstring"""
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowerCAmelCase_ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _snake_case ( __snake_case ):
"""simple docstring"""
def __init__( self : Tuple , *_A : Dict , _A : Dict=None , _A : Tuple=None , _A : Dict=None , **_A : Any):
"""simple docstring"""
super().__init__(*A_ , **A_)
_SCREAMING_SNAKE_CASE : Tuple = eval_examples
_SCREAMING_SNAKE_CASE : Dict = post_process_function
_SCREAMING_SNAKE_CASE : Union[str, Any] = quant_trainer_args
_SCREAMING_SNAKE_CASE : Any = 1_2_8 # default number of calibration samples
def _lowerCAmelCase ( self : Dict , _A : Union[str, Any]=None):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""")
_SCREAMING_SNAKE_CASE : Any = calib_dataset if calib_dataset is not None else self.calib_dataset
_SCREAMING_SNAKE_CASE : Dict = self._remove_unused_columns(A_ , description="""Calibration""")
return DataLoader(
A_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=A_ , )
def _lowerCAmelCase ( self : Dict , _A : Any=None):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = self.train_dataset if calib_dataset is None else calib_dataset
_SCREAMING_SNAKE_CASE : Dict = self.get_calib_dataloader(A_)
_SCREAMING_SNAKE_CASE : Optional[int] = self.model
quant_trainer.configure_model(A_ , self.quant_trainer_args , calib=A_)
model.eval()
quant_trainer.enable_calibration(A_)
logger.info("""***** Running calibration *****""")
logger.info(f""" Num examples = {self.calib_num}""")
logger.info(f""" Batch size = {calib_dataloader.batch_size}""")
for step, inputs in enumerate(A_):
# Prediction step
_SCREAMING_SNAKE_CASE : List[Any] = self.prediction_step(A_ , A_ , prediction_loss_only=A_)
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(A_ , self.quant_trainer_args)
_SCREAMING_SNAKE_CASE : str = model
def _lowerCAmelCase ( self : Optional[Any] , _A : Optional[int]=None , _A : Optional[Any]=None , _A : Tuple=None , _A : List[Any] = "eval"):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
_SCREAMING_SNAKE_CASE : Any = self.get_eval_dataloader(A_)
_SCREAMING_SNAKE_CASE : List[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_SCREAMING_SNAKE_CASE : List[Any] = self.compute_metrics
_SCREAMING_SNAKE_CASE : Dict = None
_SCREAMING_SNAKE_CASE : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_SCREAMING_SNAKE_CASE : int = eval_loop(
A_ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A_ , )
finally:
_SCREAMING_SNAKE_CASE : Union[str, Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_SCREAMING_SNAKE_CASE : List[str] = self.post_process_function(A_ , A_ , output.predictions)
_SCREAMING_SNAKE_CASE : Dict = self.compute_metrics(A_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"""{metric_key_prefix}_"""):
_SCREAMING_SNAKE_CASE : Optional[int] = metrics.pop(A_)
self.log(A_)
else:
_SCREAMING_SNAKE_CASE : List[Any] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
_SCREAMING_SNAKE_CASE : Tuple = self.callback_handler.on_evaluate(self.args , self.state , self.control , A_)
return metrics
def _lowerCAmelCase ( self : Tuple , _A : Any , _A : str , _A : List[Any]=None , _A : Optional[int] = "test"):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_test_dataloader(A_)
# Temporarily disable metric computation, we will do it in the loop here.
_SCREAMING_SNAKE_CASE : int = self.compute_metrics
_SCREAMING_SNAKE_CASE : Tuple = None
_SCREAMING_SNAKE_CASE : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_SCREAMING_SNAKE_CASE : Any = eval_loop(
A_ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A_ , )
finally:
_SCREAMING_SNAKE_CASE : Dict = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_SCREAMING_SNAKE_CASE : List[str] = self.post_process_function(A_ , A_ , output.predictions , """predict""")
_SCREAMING_SNAKE_CASE : Optional[Any] = self.compute_metrics(A_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"""{metric_key_prefix}_"""):
_SCREAMING_SNAKE_CASE : Tuple = metrics.pop(A_)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A_)
def _lowerCAmelCase ( self : List[Any] , _A : Any="./"):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.eval_dataset
_SCREAMING_SNAKE_CASE : Dict = self.get_eval_dataloader(A_)
_SCREAMING_SNAKE_CASE : List[str] = next(iter(A_))
# saving device - to make it consistent
_SCREAMING_SNAKE_CASE : Tuple = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""")
# convert to tuple
_SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(v.to(A_) for k, v in batch.items())
logger.info("""Converting model to be onnx compatible""")
from pytorch_quantization.nn import TensorQuantizer
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : int = self.model.to(A_)
model.eval()
model.float()
_SCREAMING_SNAKE_CASE : Tuple = model.module if hasattr(A_ , """module""") else model
quant_trainer.configure_model(A_ , self.quant_trainer_args)
_SCREAMING_SNAKE_CASE : int = os.path.join(A_ , """model.onnx""")
logger.info(f"""exporting model to {output_model_file}""")
_SCREAMING_SNAKE_CASE : List[Any] = {0: "batch_size", 1: "seq_len"}
torch.onnx.export(
A_ , A_ , A_ , export_params=A_ , opset_version=1_3 , do_constant_folding=A_ , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=A_ , )
logger.info("""onnx export finished""")
| 716 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 635 | 0 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = []
for part_id in partition_order:
_SCREAMING_SNAKE_CASE : str = df.where(F"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(__SCREAMING_SNAKE_CASE ):
expected_row_ids_and_row_dicts.append((F"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_()-> Dict:
_SCREAMING_SNAKE_CASE : Any = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
_SCREAMING_SNAKE_CASE : List[str] = spark.range(100 ).repartition(1 )
_SCREAMING_SNAKE_CASE : Any = Spark(__SCREAMING_SNAKE_CASE )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_()-> int:
_SCREAMING_SNAKE_CASE : Optional[Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
_SCREAMING_SNAKE_CASE : Optional[Any] = spark.range(10 ).repartition(2 )
_SCREAMING_SNAKE_CASE : Tuple = [1, 0]
_SCREAMING_SNAKE_CASE : str = _generate_iterable_examples(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Reverse the partitions.
_SCREAMING_SNAKE_CASE : Union[str, Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_()-> Optional[int]:
_SCREAMING_SNAKE_CASE : List[str] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
_SCREAMING_SNAKE_CASE : List[Any] = spark.range(10 ).repartition(1 )
_SCREAMING_SNAKE_CASE : str = SparkExamplesIterable(__SCREAMING_SNAKE_CASE )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__SCREAMING_SNAKE_CASE ):
assert row_id == F"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_()-> List[Any]:
_SCREAMING_SNAKE_CASE : Dict = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
_SCREAMING_SNAKE_CASE : Tuple = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
_SCREAMING_SNAKE_CASE : Optional[int] = lambda __SCREAMING_SNAKE_CASE : x.reverse()
_SCREAMING_SNAKE_CASE : str = _get_expected_row_ids_and_row_dicts_for_partition_order(__SCREAMING_SNAKE_CASE , [2, 1, 0] )
_SCREAMING_SNAKE_CASE : str = SparkExamplesIterable(__SCREAMING_SNAKE_CASE ).shuffle_data_sources(__SCREAMING_SNAKE_CASE )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_()-> Tuple:
_SCREAMING_SNAKE_CASE : Union[str, Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
_SCREAMING_SNAKE_CASE : Optional[Any] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
_SCREAMING_SNAKE_CASE : Tuple = SparkExamplesIterable(__SCREAMING_SNAKE_CASE ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
_SCREAMING_SNAKE_CASE : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(__SCREAMING_SNAKE_CASE , [0, 2] )
for i, (row_id, row_dict) in enumerate(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_SCREAMING_SNAKE_CASE : Union[str, Any] = SparkExamplesIterable(__SCREAMING_SNAKE_CASE ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
_SCREAMING_SNAKE_CASE : Dict = _get_expected_row_ids_and_row_dicts_for_partition_order(__SCREAMING_SNAKE_CASE , [1, 3] )
for i, (row_id, row_dict) in enumerate(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_()-> int:
_SCREAMING_SNAKE_CASE : Any = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
_SCREAMING_SNAKE_CASE : Any = spark.range(100 ).repartition(1 )
_SCREAMING_SNAKE_CASE : int = Spark(__SCREAMING_SNAKE_CASE )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 717 | """simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _snake_case :
"""simple docstring"""
def __init__( self : int , _A : List[Any] , _A : int , _A : int):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""")
_SCREAMING_SNAKE_CASE : str = img
_SCREAMING_SNAKE_CASE : Optional[Any] = img.shape[1]
_SCREAMING_SNAKE_CASE : Tuple = img.shape[0]
_SCREAMING_SNAKE_CASE : Any = dst_width
_SCREAMING_SNAKE_CASE : Any = dst_height
_SCREAMING_SNAKE_CASE : Any = self.src_w / self.dst_w
_SCREAMING_SNAKE_CASE : Dict = self.src_h / self.dst_h
_SCREAMING_SNAKE_CASE : Optional[Any] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 2_5_5
)
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
for i in range(self.dst_h):
for j in range(self.dst_w):
_SCREAMING_SNAKE_CASE : Any = self.img[self.get_y(_A)][self.get_x(_A)]
def _lowerCAmelCase ( self : int , _A : int):
"""simple docstring"""
return int(self.ratio_x * x)
def _lowerCAmelCase ( self : str , _A : int):
"""simple docstring"""
return int(self.ratio_y * y)
if __name__ == "__main__":
lowerCAmelCase_ , lowerCAmelCase_ = 800, 600
lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1)
lowerCAmelCase_ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output
)
waitKey(0)
destroyAllWindows()
| 635 | 0 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str:
_SCREAMING_SNAKE_CASE : List[Any] = nn.functional.normalize(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : List[str] = nn.functional.normalize(__lowerCAmelCase )
return torch.mm(__lowerCAmelCase , normalized_text_embeds.t() )
class _snake_case ( UpperCAmelCase__ ):
"""simple docstring"""
a = CLIPConfig
a = ["CLIPEncoderLayer"]
def __init__( self : str , _A : CLIPConfig):
"""simple docstring"""
super().__init__(lowerCamelCase__)
_SCREAMING_SNAKE_CASE : Optional[Any] = CLIPVisionModel(config.vision_config)
_SCREAMING_SNAKE_CASE : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCamelCase__)
_SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.ones(1_7 , config.projection_dim) , requires_grad=lowerCamelCase__)
_SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(3 , config.projection_dim) , requires_grad=lowerCamelCase__)
_SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.ones(1_7) , requires_grad=lowerCamelCase__)
_SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.ones(3) , requires_grad=lowerCamelCase__)
@torch.no_grad()
def _lowerCAmelCase ( self : Union[str, Any] , _A : List[str] , _A : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.vision_model(lowerCamelCase__)[1] # pooled_output
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.visual_projection(lowerCamelCase__)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_SCREAMING_SNAKE_CASE : List[Any] = cosine_distance(lowerCamelCase__ , self.special_care_embeds).cpu().float().numpy()
_SCREAMING_SNAKE_CASE : str = cosine_distance(lowerCamelCase__ , self.concept_embeds).cpu().float().numpy()
_SCREAMING_SNAKE_CASE : str = []
_SCREAMING_SNAKE_CASE : Any = image_embeds.shape[0]
for i in range(lowerCamelCase__):
_SCREAMING_SNAKE_CASE : List[Any] = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_SCREAMING_SNAKE_CASE : List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0])):
_SCREAMING_SNAKE_CASE : Dict = special_cos_dist[i][concept_idx]
_SCREAMING_SNAKE_CASE : str = self.special_care_embeds_weights[concept_idx].item()
_SCREAMING_SNAKE_CASE : List[str] = round(concept_cos - concept_threshold + adjustment , 3)
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]})
_SCREAMING_SNAKE_CASE : Tuple = 0.01
for concept_idx in range(len(cos_dist[0])):
_SCREAMING_SNAKE_CASE : Union[str, Any] = cos_dist[i][concept_idx]
_SCREAMING_SNAKE_CASE : int = self.concept_embeds_weights[concept_idx].item()
_SCREAMING_SNAKE_CASE : List[Any] = round(concept_cos - concept_threshold + adjustment , 3)
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCamelCase__)
result.append(lowerCamelCase__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = [len(res["""bad_concepts"""]) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _lowerCAmelCase ( self : Optional[int] , _A : torch.FloatTensor , _A : torch.FloatTensor):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = self.vision_model(lowerCamelCase__)[1] # pooled_output
_SCREAMING_SNAKE_CASE : Optional[Any] = self.visual_projection(lowerCamelCase__)
_SCREAMING_SNAKE_CASE : Any = cosine_distance(lowerCamelCase__ , self.special_care_embeds)
_SCREAMING_SNAKE_CASE : Optional[int] = cosine_distance(lowerCamelCase__ , self.concept_embeds)
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0
_SCREAMING_SNAKE_CASE : List[str] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_SCREAMING_SNAKE_CASE : int = torch.any(special_scores > 0 , dim=1)
_SCREAMING_SNAKE_CASE : Dict = special_care * 0.01
_SCREAMING_SNAKE_CASE : Union[str, Any] = special_adjustment.unsqueeze(1).expand(-1 , cos_dist.shape[1])
_SCREAMING_SNAKE_CASE : str = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_SCREAMING_SNAKE_CASE : List[str] = torch.any(concept_scores > 0 , dim=1)
return images, has_nsfw_concepts
| 718 | """simple docstring"""
import argparse
from collections import defaultdict
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int:
_SCREAMING_SNAKE_CASE : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines()
_SCREAMING_SNAKE_CASE : Optional[Any] = F"""class {class_name}("""
_SCREAMING_SNAKE_CASE : List[Any] = F"""{4 * " "}def {test_name}("""
_SCREAMING_SNAKE_CASE : Tuple = F"""{8 * " "}{correct_line.split()[0]}"""
_SCREAMING_SNAKE_CASE : List[Any] = F"""{16 * " "}{correct_line.split()[0]}"""
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
_SCREAMING_SNAKE_CASE : Dict = []
for line in lines:
if line.startswith(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = True
elif in_class and line.startswith(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = True
elif in_class and in_func and (line.startswith(__SCREAMING_SNAKE_CASE ) or line.startswith(__SCREAMING_SNAKE_CASE )):
_SCREAMING_SNAKE_CASE : Dict = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_SCREAMING_SNAKE_CASE : int = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_SCREAMING_SNAKE_CASE : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_SCREAMING_SNAKE_CASE : Optional[int] = False
else:
new_lines.append(__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , """w""" ) as f:
for line in new_lines:
f.write(__SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None )-> Optional[Any]:
if fail is not None:
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {l.strip() for l in f.readlines()}
else:
_SCREAMING_SNAKE_CASE : str = None
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
_SCREAMING_SNAKE_CASE : str = f.readlines()
_SCREAMING_SNAKE_CASE : str = defaultdict(__SCREAMING_SNAKE_CASE )
for line in correct_lines:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
lowerCAmelCase_ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 635 | 0 |
"""simple docstring"""
from math import factorial, radians
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 18 , __SCREAMING_SNAKE_CASE = 10 )-> Any:
_SCREAMING_SNAKE_CASE : Dict = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
_SCREAMING_SNAKE_CASE : Optional[int] = radians(lowercase__ )
_SCREAMING_SNAKE_CASE : List[str] = angle_in_radians
_SCREAMING_SNAKE_CASE : Dict = 3
_SCREAMING_SNAKE_CASE : Optional[int] = -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
_SCREAMING_SNAKE_CASE : Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 719 | """simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCAmelCase_ = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> str:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = create_model(
"""HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : Optional[Any] = R""".*sequential.(\d+).*"""
_SCREAMING_SNAKE_CASE : Any = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# replace sequential layers with list
_SCREAMING_SNAKE_CASE : List[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 )
_SCREAMING_SNAKE_CASE : Dict = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.""" )
elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[str] = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_SCREAMING_SNAKE_CASE : Dict = 1 if projecton_layer == 0 else 2
_SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
_SCREAMING_SNAKE_CASE : Dict = value
_SCREAMING_SNAKE_CASE : List[Any] = mixed_qkv.size(0 ) // 3
_SCREAMING_SNAKE_CASE : Optional[Any] = mixed_qkv[:qkv_dim]
_SCREAMING_SNAKE_CASE : str = mixed_qkv[qkv_dim : qkv_dim * 2]
_SCREAMING_SNAKE_CASE : Any = mixed_qkv[qkv_dim * 2 :]
_SCREAMING_SNAKE_CASE : Dict = query_layer
_SCREAMING_SNAKE_CASE : List[Any] = key_layer
_SCREAMING_SNAKE_CASE : Dict = value_layer
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = value
return model_state_dict
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> List[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE )
clap_model.eval()
_SCREAMING_SNAKE_CASE : Dict = clap_model.state_dict()
_SCREAMING_SNAKE_CASE : Tuple = rename_state_dict(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : int = ClapConfig()
_SCREAMING_SNAKE_CASE : Tuple = enable_fusion
_SCREAMING_SNAKE_CASE : Dict = ClapModel(__SCREAMING_SNAKE_CASE )
# ignore the spectrogram embedding layer
model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
lowerCAmelCase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 635 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> None:
create_state_space_tree(UpperCamelCase__ , [] , 0 )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> None:
if index == len(UpperCamelCase__ ):
print(UpperCamelCase__ )
return
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 720 | """simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_A , )
assert hasattr(self , """env""")
def _lowerCAmelCase ( self : Union[str, Any] , _A : str=1):
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , )
def _lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any]):
"""simple docstring"""
TrainingJobAnalytics(_A).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""")
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_SCREAMING_SNAKE_CASE : Any = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_SCREAMING_SNAKE_CASE : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""])
_SCREAMING_SNAKE_CASE : Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_SCREAMING_SNAKE_CASE : int = (
Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy)
assert all(t <= self.results["""eval_loss"""] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""") as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _A)
| 635 | 0 |
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _snake_case ( __snake_case , __snake_case ):
"""simple docstring"""
a = "pixel_values"
a = False
a = TimmBackboneConfig
def __init__( self : Optional[Any] , _A : Dict , **_A : Any):
"""simple docstring"""
requires_backends(self , """timm""")
super().__init__(UpperCAmelCase__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""")
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(UpperCAmelCase__ , """out_features""") and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""")
_SCREAMING_SNAKE_CASE : Optional[Any] = getattr(UpperCAmelCase__ , """use_pretrained_backbone""" , UpperCAmelCase__)
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""")
# We just take the final layer by default. This matches the default for the transformers models.
_SCREAMING_SNAKE_CASE : Optional[Any] = config.out_indices if getattr(UpperCAmelCase__ , """out_indices""" , UpperCAmelCase__) is not None else (-1,)
_SCREAMING_SNAKE_CASE : Union[str, Any] = timm.create_model(
config.backbone , pretrained=UpperCAmelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCAmelCase__ , **UpperCAmelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_SCREAMING_SNAKE_CASE : str = self._backbone.return_layers
_SCREAMING_SNAKE_CASE : str = {layer["""module"""]: str(UpperCAmelCase__) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(UpperCAmelCase__)
@classmethod
def _lowerCAmelCase ( cls : List[str] , _A : Optional[int] , *_A : Union[str, Any] , **_A : List[Any]):
"""simple docstring"""
requires_backends(cls , ["""vision""", """timm"""])
from ...models.timm_backbone import TimmBackboneConfig
_SCREAMING_SNAKE_CASE : Dict = kwargs.pop("""config""" , TimmBackboneConfig())
_SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("""use_timm_backbone""" , UpperCAmelCase__)
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""")
_SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("""num_channels""" , config.num_channels)
_SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("""features_only""" , config.features_only)
_SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone)
_SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("""out_indices""" , config.out_indices)
_SCREAMING_SNAKE_CASE : Optional[int] = TimmBackboneConfig(
backbone=UpperCAmelCase__ , num_channels=UpperCAmelCase__ , features_only=UpperCAmelCase__ , use_pretrained_backbone=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , )
return super()._from_config(UpperCAmelCase__ , **UpperCAmelCase__)
def _lowerCAmelCase ( self : str , _A : int):
"""simple docstring"""
pass
def _lowerCAmelCase ( self : List[Any] , _A : str , _A : int=None , _A : Union[str, Any]=None , _A : Any=None , **_A : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE : List[Any] = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_SCREAMING_SNAKE_CASE : Optional[Any] = self._all_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = self._backbone(UpperCAmelCase__ , **UpperCAmelCase__)
_SCREAMING_SNAKE_CASE : List[str] = self._return_layers
_SCREAMING_SNAKE_CASE : List[str] = tuple(hidden_states[i] for i in self.out_indices)
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self._backbone(UpperCAmelCase__ , **UpperCAmelCase__)
_SCREAMING_SNAKE_CASE : List[str] = None
_SCREAMING_SNAKE_CASE : Any = tuple(UpperCAmelCase__)
_SCREAMING_SNAKE_CASE : Optional[Any] = tuple(UpperCAmelCase__) if hidden_states is not None else None
if not return_dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = (feature_maps,)
if output_hidden_states:
_SCREAMING_SNAKE_CASE : Tuple = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ , attentions=UpperCAmelCase__)
| 721 | """simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any:
return max(metric_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for gt in ground_truths )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]:
_SCREAMING_SNAKE_CASE : List[str] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()]
_SCREAMING_SNAKE_CASE : Dict = []
if args.gold_data_mode == "qa":
_SCREAMING_SNAKE_CASE : int = pd.read_csv(__SCREAMING_SNAKE_CASE , sep="""\t""" , header=__SCREAMING_SNAKE_CASE )
for answer_list in data[1]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ast.literal_eval(__SCREAMING_SNAKE_CASE )
answers.append(__SCREAMING_SNAKE_CASE )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()]
_SCREAMING_SNAKE_CASE : Optional[int] = [[reference] for reference in references]
_SCREAMING_SNAKE_CASE : Optional[int] = 0
for prediction, ground_truths in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
total += 1
em += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
fa += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = 1_00.0 * em / total
_SCREAMING_SNAKE_CASE : Optional[Any] = 1_00.0 * fa / total
logger.info(F"""F1: {fa:.2f}""" )
logger.info(F"""EM: {em:.2f}""" )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Tuple = args.k
_SCREAMING_SNAKE_CASE : int = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()]
_SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()]
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
for hypo, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = set(hypo.split("""\t""" )[:k] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_SCREAMING_SNAKE_CASE : int = 1_00.0 * em / total
logger.info(F"""Precision@{k}: {em: .2f}""" )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
def strip_title(__SCREAMING_SNAKE_CASE ):
if title.startswith("""\"""" ):
_SCREAMING_SNAKE_CASE : Optional[int] = title[1:]
if title.endswith("""\"""" ):
_SCREAMING_SNAKE_CASE : str = title[:-1]
return title
_SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , )["""input_ids"""].to(args.device )
_SCREAMING_SNAKE_CASE : List[str] = rag_model.rag.question_encoder(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Any = question_enc_outputs[0]
_SCREAMING_SNAKE_CASE : List[Any] = rag_model.retriever(
__SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
_SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
for docs in all_docs:
_SCREAMING_SNAKE_CASE : str = [strip_title(__SCREAMING_SNAKE_CASE ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(__SCREAMING_SNAKE_CASE ) )
return provenance_strings
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]:
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.input_ids.to(args.device )
_SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.attention_mask.to(args.device )
_SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.generate( # rag_model overwrites generate
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.generator_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
if args.print_predictions:
for q, a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
logger.info("""Q: {} - A: {}""".format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
return answers
def lowerCamelCase_()-> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__SCREAMING_SNAKE_CASE , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=__SCREAMING_SNAKE_CASE , choices=["""exact""", """compressed""", """legacy"""] , type=__SCREAMING_SNAKE_CASE , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=__SCREAMING_SNAKE_CASE , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__SCREAMING_SNAKE_CASE , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=__SCREAMING_SNAKE_CASE , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=__SCREAMING_SNAKE_CASE , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=__SCREAMING_SNAKE_CASE , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=__SCREAMING_SNAKE_CASE , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
_SCREAMING_SNAKE_CASE : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if args.model_type is None:
_SCREAMING_SNAKE_CASE : Optional[int] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
_SCREAMING_SNAKE_CASE : List[Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
_SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = args.index_name
if args.index_path is not None:
_SCREAMING_SNAKE_CASE : Any = args.index_path
else:
_SCREAMING_SNAKE_CASE : Any = BartForConditionalGeneration
_SCREAMING_SNAKE_CASE : int = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , __SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
_SCREAMING_SNAKE_CASE : Tuple = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(__SCREAMING_SNAKE_CASE ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
_SCREAMING_SNAKE_CASE : str = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , retriever=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
model.retriever.init_retrieval()
else:
_SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
_SCREAMING_SNAKE_CASE : str = []
for line in tqdm(__SCREAMING_SNAKE_CASE ):
questions.append(line.strip() )
if len(__SCREAMING_SNAKE_CASE ) == args.eval_batch_size:
_SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) + """\n""" )
preds_file.flush()
_SCREAMING_SNAKE_CASE : Any = []
if len(__SCREAMING_SNAKE_CASE ) > 0:
_SCREAMING_SNAKE_CASE : List[str] = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
preds_file.flush()
score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCAmelCase_ = get_args()
main(args)
| 635 | 0 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _snake_case ( __a ):
"""simple docstring"""
def _lowerCAmelCase ( self : Any , _A : Any):
"""simple docstring"""
with open(A__ , encoding="""utf-8""") as input_file:
_SCREAMING_SNAKE_CASE : List[Any] = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""")
_SCREAMING_SNAKE_CASE : Dict = input_file.read()
_SCREAMING_SNAKE_CASE : Optional[Any] = regexp.search(A__)
return match
def _lowerCAmelCase ( self : List[str] , _A : List[Any]):
"""simple docstring"""
with open(A__ , encoding="""utf-8""") as input_file:
_SCREAMING_SNAKE_CASE : str = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL)
_SCREAMING_SNAKE_CASE : Dict = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_SCREAMING_SNAKE_CASE : List[Any] = regexp.finditer(A__)
_SCREAMING_SNAKE_CASE : List[Any] = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = Path("""./datasets""")
_SCREAMING_SNAKE_CASE : Any = list(dataset_paths.absolute().glob("""**/*.py"""))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(A__)):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""")
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = Path("""./datasets""")
_SCREAMING_SNAKE_CASE : Optional[Any] = list(dataset_paths.absolute().glob("""**/*.py"""))
for dataset in dataset_files:
if self._no_print_statements(str(A__)):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""") | 700 | """simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowerCamelCase_(__SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=1_026 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , __SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , )-> Union[str, Any]:
set_seed(3 )
# generate train_data and objective_set
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = generate_datasets(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , number=__SCREAMING_SNAKE_CASE , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_SCREAMING_SNAKE_CASE : Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
_SCREAMING_SNAKE_CASE : Any = load_gpta("""gpt2""" ).to(__SCREAMING_SNAKE_CASE )
print("""computing perplexity on objective set""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).item()
print("""perplexity on objective set:""" , __SCREAMING_SNAKE_CASE )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=15 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE="igf_model.pt" , )-> Optional[int]:
set_seed(42 )
# Load pre-trained model
_SCREAMING_SNAKE_CASE : Any = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
_SCREAMING_SNAKE_CASE : Union[str, Any] = SecondaryLearner(__SCREAMING_SNAKE_CASE )
# Train secondary learner
_SCREAMING_SNAKE_CASE : Any = train_secondary_learner(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_epochs=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , eval_freq=100 , igf_model_path=__SCREAMING_SNAKE_CASE , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_000 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=recopy_gpta , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Tuple = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = RandomSampler(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = DataLoader(__SCREAMING_SNAKE_CASE , sampler=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(__SCREAMING_SNAKE_CASE )) + 1
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : Any = torch.zeros((1, context_len) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = recopy_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
model.train()
if secondary_learner is not None:
secondary_learner.to(__SCREAMING_SNAKE_CASE )
secondary_learner.eval()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Optional[int] = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : int = []
# Compute the performance of the transformer model at the beginning
_SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
test_perps.append(__SCREAMING_SNAKE_CASE )
print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE )
for epoch in range(int(__SCREAMING_SNAKE_CASE ) ):
for step, example in enumerate(__SCREAMING_SNAKE_CASE ):
torch.cuda.empty_cache()
_SCREAMING_SNAKE_CASE : Any = random.randint(0 , example.size(2 ) - context_len - 1 )
_SCREAMING_SNAKE_CASE : int = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[str] = True
if secondary_learner is not None:
_SCREAMING_SNAKE_CASE : List[Any] = secondary_learner.forward(
torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__SCREAMING_SNAKE_CASE ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
_SCREAMING_SNAKE_CASE : Dict = -1
if predicted_q < threshold:
_SCREAMING_SNAKE_CASE : List[str] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_SCREAMING_SNAKE_CASE : Any = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
test_perps.append(__SCREAMING_SNAKE_CASE )
print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowerCamelCase_()-> Tuple:
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=__SCREAMING_SNAKE_CASE , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1_000 , type=__SCREAMING_SNAKE_CASE , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=128 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=__SCREAMING_SNAKE_CASE , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1_026 , type=__SCREAMING_SNAKE_CASE , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=__SCREAMING_SNAKE_CASE , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=__SCREAMING_SNAKE_CASE , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=__SCREAMING_SNAKE_CASE , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
_SCREAMING_SNAKE_CASE : Optional[int] = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
_SCREAMING_SNAKE_CASE : int = training_secondary_learner(
__SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
_SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=__SCREAMING_SNAKE_CASE , secondary_learner=__SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 635 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class _snake_case ( __snake_case ):
"""simple docstring"""
a = 'longformer'
def __init__( self : Optional[int] , _A : Union[List[int], int] = 5_1_2 , _A : int = 2 , _A : int = 1 , _A : int = 0 , _A : int = 2 , _A : int = 3_0_5_2_2 , _A : int = 7_6_8 , _A : int = 1_2 , _A : int = 1_2 , _A : int = 3_0_7_2 , _A : str = "gelu" , _A : float = 0.1 , _A : float = 0.1 , _A : int = 5_1_2 , _A : int = 2 , _A : float = 0.02 , _A : float = 1e-12 , _A : bool = False , **_A : Optional[int] , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , **_A)
_SCREAMING_SNAKE_CASE : Any = attention_window
_SCREAMING_SNAKE_CASE : str = sep_token_id
_SCREAMING_SNAKE_CASE : List[Any] = bos_token_id
_SCREAMING_SNAKE_CASE : Any = eos_token_id
_SCREAMING_SNAKE_CASE : int = vocab_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Tuple = hidden_act
_SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
_SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size
_SCREAMING_SNAKE_CASE : Tuple = initializer_range
_SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
_SCREAMING_SNAKE_CASE : Optional[Any] = onnx_export
class _snake_case ( __snake_case ):
"""simple docstring"""
def __init__( self : Tuple , _A : "PretrainedConfig" , _A : str = "default" , _A : "List[PatchingSpec]" = None):
"""simple docstring"""
super().__init__(_A , _A , _A)
_SCREAMING_SNAKE_CASE : Optional[int] = True
@property
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_SCREAMING_SNAKE_CASE : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
])
@property
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = super().outputs
if self.task == "default":
_SCREAMING_SNAKE_CASE : Optional[Any] = {0: """batch"""}
return outputs
@property
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
return 1e-4
@property
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return max(super().default_onnx_opset , 1_4)
def _lowerCAmelCase ( self : Any , _A : "PreTrainedTokenizerBase" , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = super().generate_dummy_inputs(
preprocessor=_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A)
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_SCREAMING_SNAKE_CASE : str = torch.zeros_like(inputs["""input_ids"""])
# make every second token global
_SCREAMING_SNAKE_CASE : str = 1
return inputs
| 701 | """simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( __snake_case ):
"""simple docstring"""
a = ["image_processor", "tokenizer"]
a = "ChineseCLIPImageProcessor"
a = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Dict , _A : Tuple=None , _A : List[Any]=None , **_A : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _A , )
_SCREAMING_SNAKE_CASE : str = kwargs.pop("""feature_extractor""")
_SCREAMING_SNAKE_CASE : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(_A , _A)
_SCREAMING_SNAKE_CASE : Dict = self.image_processor
def __call__( self : Optional[int] , _A : Optional[Any]=None , _A : Any=None , _A : Tuple=None , **_A : int):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(_A , return_tensors=_A , **_A)
if images is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(_A , return_tensors=_A , **_A)
if text is not None and images is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A) , tensor_type=_A)
def _lowerCAmelCase ( self : str , *_A : Any , **_A : Any):
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A)
def _lowerCAmelCase ( self : Union[str, Any] , *_A : List[Any] , **_A : Any):
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A)
@property
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _A , )
return self.image_processor_class
| 635 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ : Tuple = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : str = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : int = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Any = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 702 | """simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = ['''model.decoder.embed_positions.weights''']
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]:
if "emb" in name:
_SCREAMING_SNAKE_CASE : List[Any] = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
_SCREAMING_SNAKE_CASE : List[str] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
_SCREAMING_SNAKE_CASE : int = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
_SCREAMING_SNAKE_CASE : Dict = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
_SCREAMING_SNAKE_CASE : Dict = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
_SCREAMING_SNAKE_CASE : Tuple = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
_SCREAMING_SNAKE_CASE : str = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple[Dict, Dict]:
_SCREAMING_SNAKE_CASE : str = list(state_dict.keys() )
_SCREAMING_SNAKE_CASE : Tuple = {}
for key in keys:
_SCREAMING_SNAKE_CASE : Dict = state_dict.pop(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : int = rename_keys(__SCREAMING_SNAKE_CASE )
if "in_proj_weight" in key:
# split fused qkv proj
_SCREAMING_SNAKE_CASE : str = val[:hidden_size, :]
_SCREAMING_SNAKE_CASE : Any = val[hidden_size : 2 * hidden_size, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_SCREAMING_SNAKE_CASE : int = val
else:
_SCREAMING_SNAKE_CASE : Dict = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
_SCREAMING_SNAKE_CASE : Optional[Any] = 1_024
_SCREAMING_SNAKE_CASE : str = 24
_SCREAMING_SNAKE_CASE : Any = 16
elif checkpoint == "medium":
_SCREAMING_SNAKE_CASE : Dict = 1_536
_SCREAMING_SNAKE_CASE : Union[str, Any] = 48
_SCREAMING_SNAKE_CASE : Optional[Any] = 24
elif checkpoint == "large":
_SCREAMING_SNAKE_CASE : List[Any] = 2_048
_SCREAMING_SNAKE_CASE : Optional[int] = 48
_SCREAMING_SNAKE_CASE : str = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = MusicgenDecoderConfig(
hidden_size=__SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=__SCREAMING_SNAKE_CASE , num_attention_heads=__SCREAMING_SNAKE_CASE , )
return config
@torch.no_grad()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="cpu" )-> str:
_SCREAMING_SNAKE_CASE : str = MusicGen.get_pretrained(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[str] = decoder_config_from_checkpoint(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = fairseq_model.lm.state_dict()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = rename_state_dict(
__SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size )
_SCREAMING_SNAKE_CASE : Tuple = TaEncoderModel.from_pretrained("""t5-base""" )
_SCREAMING_SNAKE_CASE : List[Any] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
_SCREAMING_SNAKE_CASE : str = MusicgenForCausalLM(__SCREAMING_SNAKE_CASE ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
_SCREAMING_SNAKE_CASE : Dict = MusicgenForConditionalGeneration(text_encoder=__SCREAMING_SNAKE_CASE , audio_encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__SCREAMING_SNAKE_CASE )
# check we can do a forward pass
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_SCREAMING_SNAKE_CASE : Dict = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[int] = model(input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
_SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""t5-base""" )
_SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
_SCREAMING_SNAKE_CASE : Optional[int] = MusicgenProcessor(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
# set the appropriate bos/pad token ids
_SCREAMING_SNAKE_CASE : Optional[Any] = 2_048
_SCREAMING_SNAKE_CASE : List[Any] = 2_048
# set other default generation config params
_SCREAMING_SNAKE_CASE : Any = int(30 * audio_encoder.config.frame_rate )
_SCREAMING_SNAKE_CASE : Tuple = True
_SCREAMING_SNAKE_CASE : int = 3.0
if pytorch_dump_folder is not None:
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(__SCREAMING_SNAKE_CASE )
processor.push_to_hub(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 635 | 0 |
lowerCAmelCase_ = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
lowerCAmelCase_ = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple:
_SCREAMING_SNAKE_CASE : Optional[Any] = from_type.lower().strip("""s""" )
_SCREAMING_SNAKE_CASE : int = to_type.lower().strip("""s""" )
_SCREAMING_SNAKE_CASE : Any = UNIT_SYMBOL.get(lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Dict = UNIT_SYMBOL.get(lowerCAmelCase_ , lowerCAmelCase_ )
if from_sanitized not in METRIC_CONVERSION:
_SCREAMING_SNAKE_CASE : int = (
F"""Invalid \'from_type\' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(lowerCAmelCase_ )}"""
)
raise ValueError(lowerCAmelCase_ )
if to_sanitized not in METRIC_CONVERSION:
_SCREAMING_SNAKE_CASE : Any = (
F"""Invalid \'to_type\' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(lowerCAmelCase_ )}"""
)
raise ValueError(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = METRIC_CONVERSION[from_sanitized]
_SCREAMING_SNAKE_CASE : Dict = METRIC_CONVERSION[to_sanitized]
_SCREAMING_SNAKE_CASE : str = 1
if from_exponent > to_exponent:
_SCREAMING_SNAKE_CASE : List[str] = from_exponent - to_exponent
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = -(to_exponent - from_exponent)
return value * pow(10 , lowerCAmelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 703 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "sew"
def __init__( self : List[Any] , _A : Tuple=3_2 , _A : str=7_6_8 , _A : Dict=1_2 , _A : Tuple=1_2 , _A : Optional[Any]=3_0_7_2 , _A : List[str]=2 , _A : Dict="gelu" , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.0 , _A : str=0.1 , _A : Tuple=0.1 , _A : Optional[int]=0.02 , _A : Dict=1e-5 , _A : str="group" , _A : Tuple="gelu" , _A : Union[str, Any]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _A : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _A : Any=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _A : Tuple=False , _A : Tuple=1_2_8 , _A : int=1_6 , _A : Union[str, Any]=True , _A : Optional[Any]=0.05 , _A : List[Any]=1_0 , _A : Union[str, Any]=2 , _A : Tuple=0.0 , _A : Union[str, Any]=1_0 , _A : Optional[int]=0 , _A : Union[str, Any]="mean" , _A : Optional[int]=False , _A : List[Any]=False , _A : int=2_5_6 , _A : str=0 , _A : Optional[int]=1 , _A : List[Any]=2 , **_A : Dict , ):
"""simple docstring"""
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A)
_SCREAMING_SNAKE_CASE : str = hidden_size
_SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_norm
_SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_activation
_SCREAMING_SNAKE_CASE : Dict = list(_A)
_SCREAMING_SNAKE_CASE : int = list(_A)
_SCREAMING_SNAKE_CASE : int = list(_A)
_SCREAMING_SNAKE_CASE : str = conv_bias
_SCREAMING_SNAKE_CASE : Tuple = num_conv_pos_embeddings
_SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embedding_groups
_SCREAMING_SNAKE_CASE : Tuple = len(self.conv_dim)
_SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[str] = intermediate_size
_SCREAMING_SNAKE_CASE : str = squeeze_factor
_SCREAMING_SNAKE_CASE : Dict = hidden_act
_SCREAMING_SNAKE_CASE : str = num_attention_heads
_SCREAMING_SNAKE_CASE : Dict = hidden_dropout
_SCREAMING_SNAKE_CASE : Tuple = attention_dropout
_SCREAMING_SNAKE_CASE : int = activation_dropout
_SCREAMING_SNAKE_CASE : Any = feat_proj_dropout
_SCREAMING_SNAKE_CASE : str = final_dropout
_SCREAMING_SNAKE_CASE : Union[str, Any] = layerdrop
_SCREAMING_SNAKE_CASE : Any = layer_norm_eps
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : List[Any] = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_SCREAMING_SNAKE_CASE : List[Any] = apply_spec_augment
_SCREAMING_SNAKE_CASE : List[Any] = mask_time_prob
_SCREAMING_SNAKE_CASE : List[str] = mask_time_length
_SCREAMING_SNAKE_CASE : List[Any] = mask_time_min_masks
_SCREAMING_SNAKE_CASE : List[Any] = mask_feature_prob
_SCREAMING_SNAKE_CASE : int = mask_feature_length
_SCREAMING_SNAKE_CASE : List[Any] = mask_feature_min_masks
# ctc loss
_SCREAMING_SNAKE_CASE : int = ctc_loss_reduction
_SCREAMING_SNAKE_CASE : Optional[int] = ctc_zero_infinity
# sequence classification
_SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum
_SCREAMING_SNAKE_CASE : List[str] = classifier_proj_size
@property
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1)
| 635 | 0 |
"""simple docstring"""
from PIL import Image
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]:
_SCREAMING_SNAKE_CASE : Dict = (259 * (level + 255)) / (255 * (259 - level))
def contrast(__SCREAMING_SNAKE_CASE ) -> int:
return int(128 + factor * (c - 128) )
return img.point(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
lowerCAmelCase_ = change_contrast(img, 170)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 704 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 635 | 0 |
"""simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
_SCREAMING_SNAKE_CASE : int = 4
_SCREAMING_SNAKE_CASE : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 705 | """simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
_SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple:
_SCREAMING_SNAKE_CASE : int = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : List[Any] = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE : List[Any] = (
Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple:
_SCREAMING_SNAKE_CASE : Tuple = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : Dict = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str:
if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = parquet_path
elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path]
_SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=("train",) )-> Union[str, Any]:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for split in splits:
_SCREAMING_SNAKE_CASE : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
_SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : List[str] = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE : str = (
Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE : int = ParquetDatasetReader({"""train""": parquet_path} , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
if split:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {split: parquet_path}
else:
_SCREAMING_SNAKE_CASE : Optional[int] = """train"""
_SCREAMING_SNAKE_CASE : Any = {"""train""": parquet_path, """test""": parquet_path}
_SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]:
_SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / """foo.parquet""" )
_SCREAMING_SNAKE_CASE : str = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Dict = str(shared_datadir / """test_image_rgb.jpg""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""image""": [image_path]}
_SCREAMING_SNAKE_CASE : Optional[Any] = Features({"""image""": Image()} )
_SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__SCREAMING_SNAKE_CASE ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int:
assert get_writer_batch_size(__SCREAMING_SNAKE_CASE ) == expected
| 635 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=__snake_case ):
"""simple docstring"""
a = ["torch", "transformers", "onnx"]
def __init__( self : int , *_A : Any , **_A : Union[str, Any]):
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _lowerCAmelCase ( cls : Dict , *_A : Optional[int] , **_A : Dict):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _lowerCAmelCase ( cls : Optional[int] , *_A : Optional[int] , **_A : int):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class _snake_case ( metaclass=__snake_case ):
"""simple docstring"""
a = ["torch", "transformers", "onnx"]
def __init__( self : Dict , *_A : Optional[Any] , **_A : List[Any]):
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _lowerCAmelCase ( cls : List[str] , *_A : Any , **_A : str):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _lowerCAmelCase ( cls : Tuple , *_A : str , **_A : Optional[Any]):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class _snake_case ( metaclass=__snake_case ):
"""simple docstring"""
a = ["torch", "transformers", "onnx"]
def __init__( self : int , *_A : int , **_A : Optional[int]):
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _lowerCAmelCase ( cls : List[Any] , *_A : Optional[int] , **_A : str):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _lowerCAmelCase ( cls : Optional[Any] , *_A : str , **_A : int):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class _snake_case ( metaclass=__snake_case ):
"""simple docstring"""
a = ["torch", "transformers", "onnx"]
def __init__( self : Optional[int] , *_A : Optional[Any] , **_A : List[Any]):
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _lowerCAmelCase ( cls : Optional[Any] , *_A : Optional[int] , **_A : List[Any]):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _lowerCAmelCase ( cls : str , *_A : List[str] , **_A : Any):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class _snake_case ( metaclass=__snake_case ):
"""simple docstring"""
a = ["torch", "transformers", "onnx"]
def __init__( self : Tuple , *_A : List[Any] , **_A : int):
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _lowerCAmelCase ( cls : int , *_A : Optional[int] , **_A : List[Any]):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _lowerCAmelCase ( cls : int , *_A : List[Any] , **_A : Union[str, Any]):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class _snake_case ( metaclass=__snake_case ):
"""simple docstring"""
a = ["torch", "transformers", "onnx"]
def __init__( self : Any , *_A : Any , **_A : str):
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _lowerCAmelCase ( cls : List[Any] , *_A : str , **_A : Optional[int]):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _lowerCAmelCase ( cls : Any , *_A : int , **_A : Dict):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
| 706 | """simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""only integers accepted as input""" )
else:
_SCREAMING_SNAKE_CASE : List[Any] = str(abs(__SCREAMING_SNAKE_CASE ) )
_SCREAMING_SNAKE_CASE : List[str] = [list(__SCREAMING_SNAKE_CASE ) for char in range(len(__SCREAMING_SNAKE_CASE ) )]
for index in range(len(__SCREAMING_SNAKE_CASE ) ):
num_transpositions[index].pop(__SCREAMING_SNAKE_CASE )
return max(
int("""""".join(list(__SCREAMING_SNAKE_CASE ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 635 | 0 |
"""simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : List[Any] = len(lowercase_ )
for i in range(length - 1 ):
_SCREAMING_SNAKE_CASE : Optional[int] = i
for k in range(i + 1 , lowercase_ ):
if collection[k] < collection[least]:
_SCREAMING_SNAKE_CASE : List[str] = k
if least != i:
_SCREAMING_SNAKE_CASE : Optional[Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowerCAmelCase_ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase_ = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 707 | """simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A)
_SCREAMING_SNAKE_CASE : Any = -1
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
_SCREAMING_SNAKE_CASE : Dict = model.generate(_A , max_new_tokens=1_0 , do_sample=_A)
_SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(greedy_ids[0])
with CaptureStdout() as cs:
_SCREAMING_SNAKE_CASE : Any = TextStreamer(_A)
model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_SCREAMING_SNAKE_CASE : str = cs.out[:-1]
self.assertEqual(_A , _A)
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
_SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A)
_SCREAMING_SNAKE_CASE : List[Any] = -1
_SCREAMING_SNAKE_CASE : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A)
_SCREAMING_SNAKE_CASE : Any = tokenizer.decode(greedy_ids[0])
_SCREAMING_SNAKE_CASE : List[Any] = TextIteratorStreamer(_A)
_SCREAMING_SNAKE_CASE : Any = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer}
_SCREAMING_SNAKE_CASE : List[Any] = Thread(target=model.generate , kwargs=_A)
thread.start()
_SCREAMING_SNAKE_CASE : Any = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_A , _A)
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
_SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A)
_SCREAMING_SNAKE_CASE : Any = -1
_SCREAMING_SNAKE_CASE : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A)
_SCREAMING_SNAKE_CASE : str = greedy_ids[:, input_ids.shape[1] :]
_SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(new_greedy_ids[0])
with CaptureStdout() as cs:
_SCREAMING_SNAKE_CASE : Any = TextStreamer(_A , skip_prompt=_A)
model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_SCREAMING_SNAKE_CASE : Optional[int] = cs.out[:-1]
self.assertEqual(_A , _A)
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""distilgpt2""")
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""").to(_A)
_SCREAMING_SNAKE_CASE : int = -1
_SCREAMING_SNAKE_CASE : List[str] = torch.ones((1, 5) , device=_A).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_SCREAMING_SNAKE_CASE : Optional[int] = TextStreamer(_A , skip_special_tokens=_A)
model.generate(_A , max_new_tokens=1 , do_sample=_A , streamer=_A)
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_SCREAMING_SNAKE_CASE : Optional[Any] = cs.out[:-1] # Remove the final "\n"
_SCREAMING_SNAKE_CASE : Tuple = tokenizer(_A , return_tensors="""pt""")
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1))
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
_SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A)
_SCREAMING_SNAKE_CASE : Tuple = -1
_SCREAMING_SNAKE_CASE : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
_SCREAMING_SNAKE_CASE : int = TextIteratorStreamer(_A , timeout=0.001)
_SCREAMING_SNAKE_CASE : List[Any] = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer}
_SCREAMING_SNAKE_CASE : List[str] = Thread(target=model.generate , kwargs=_A)
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_A):
_SCREAMING_SNAKE_CASE : str = """"""
for new_text in streamer:
streamer_text += new_text
| 635 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase_ = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 708 | """simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "facebook/bart-large-mnli"
a = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a = "text_classifier"
a = AutoTokenizer
a = AutoModelForSequenceClassification
a = ["text", ["text"]]
a = ["text"]
def _lowerCAmelCase ( self : int):
"""simple docstring"""
super().setup()
_SCREAMING_SNAKE_CASE : Any = self.model.config
_SCREAMING_SNAKE_CASE : Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail"""):
_SCREAMING_SNAKE_CASE : List[Any] = int(_A)
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""")
def _lowerCAmelCase ( self : Optional[Any] , _A : Tuple , _A : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = labels
return self.pre_processor(
[text] * len(_A) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def _lowerCAmelCase ( self : Tuple , _A : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = outputs.logits
_SCREAMING_SNAKE_CASE : List[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 635 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCAmelCase_ = False
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""")
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_lowerCAmelCase)
pipe.set_progress_bar_config(disable=_lowerCAmelCase)
_SCREAMING_SNAKE_CASE : List[Any] = """A painting of a squirrel eating a burger """
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0)
_SCREAMING_SNAKE_CASE : Optional[Any] = pipe(
prompt=_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""").images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase)
_SCREAMING_SNAKE_CASE : Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained(_lowerCAmelCase)
pipe.to(_lowerCAmelCase)
pipe.set_progress_bar_config(disable=_lowerCAmelCase)
_SCREAMING_SNAKE_CASE : Union[str, Any] = generator.manual_seed(0)
_SCREAMING_SNAKE_CASE : Optional[int] = pipe(
prompt=_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""").images
assert np.abs(image - new_image).sum() < 1e-5, "Models don't have the same forward pass"
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa)
pipe.to(_lowerCAmelCase)
pipe.set_progress_bar_config(disable=_lowerCAmelCase)
_SCREAMING_SNAKE_CASE : Dict = """A painting of a squirrel eating a burger """
_SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0)
_SCREAMING_SNAKE_CASE : List[Any] = pipe(
prompt=_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="""numpy""").images
_SCREAMING_SNAKE_CASE : List[str] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 709 | """simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""")
_SCREAMING_SNAKE_CASE : str = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""")
model.to(_A)
from datasets import load_dataset
_SCREAMING_SNAKE_CASE : Any = load_dataset("""nielsr/rvlcdip-demo""")
_SCREAMING_SNAKE_CASE : Any = dataset["""train"""][0]["""image"""].convert("""RGB""")
_SCREAMING_SNAKE_CASE : str = image_processor(_A , return_tensors="""pt""").to(_A)
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Any = model(**_A)
_SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
_SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_6))
self.assertEqual(logits.shape , _A)
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=_A , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1e-4))
| 635 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _snake_case :
"""simple docstring"""
@staticmethod
def _lowerCAmelCase ( *_A : Tuple , **_A : Any):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
a = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _lowerCAmelCase ( self : List[Any] , _A : str , _A : Dict , _A : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""")
_SCREAMING_SNAKE_CASE : Optional[int] = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png"""),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def _lowerCAmelCase ( self : str , _A : str , _A : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = vqa_pipeline(_a , top_k=1)
self.assertEqual(
_a , [
[{"""score""": ANY(_a), """answer""": ANY(_a)}],
[{"""score""": ANY(_a), """answer""": ANY(_a)}],
] , )
@require_torch
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""")
_SCREAMING_SNAKE_CASE : Dict = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_SCREAMING_SNAKE_CASE : Tuple = """How many cats are there?"""
_SCREAMING_SNAKE_CASE : List[str] = vqa_pipeline(image=_a , question="""How many cats are there?""" , top_k=2)
self.assertEqual(
_a , [{"""score""": ANY(_a), """answer""": ANY(_a)}, {"""score""": ANY(_a), """answer""": ANY(_a)}])
_SCREAMING_SNAKE_CASE : Optional[int] = vqa_pipeline({"""image""": image, """question""": question} , top_k=2)
self.assertEqual(
_a , [{"""score""": ANY(_a), """answer""": ANY(_a)}, {"""score""": ANY(_a), """answer""": ANY(_a)}])
@slow
@require_torch
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""")
_SCREAMING_SNAKE_CASE : Optional[int] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_SCREAMING_SNAKE_CASE : List[str] = """How many cats are there?"""
_SCREAMING_SNAKE_CASE : List[str] = vqa_pipeline(image=_a , question=_a , top_k=2)
self.assertEqual(
nested_simplify(_a , decimals=4) , [{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}])
_SCREAMING_SNAKE_CASE : Optional[Any] = vqa_pipeline({"""image""": image, """question""": question} , top_k=2)
self.assertEqual(
nested_simplify(_a , decimals=4) , [{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}])
_SCREAMING_SNAKE_CASE : Union[str, Any] = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2)
self.assertEqual(
nested_simplify(_a , decimals=4) , [[{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""")
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
pass
| 710 | """simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "M-CLIP"
def __init__( self : Optional[Any] , _A : List[str]=1_0_2_4 , _A : Union[str, Any]=7_6_8 , **_A : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = transformerDimSize
_SCREAMING_SNAKE_CASE : List[str] = imageDimSize
super().__init__(**_A)
class _snake_case ( __snake_case ):
"""simple docstring"""
a = MCLIPConfig
def __init__( self : Dict , _A : Optional[Any] , *_A : Any , **_A : Dict):
"""simple docstring"""
super().__init__(_A , *_A , **_A)
_SCREAMING_SNAKE_CASE : Tuple = XLMRobertaModel(_A)
_SCREAMING_SNAKE_CASE : List[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims)
def _lowerCAmelCase ( self : Union[str, Any] , _A : str , _A : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.transformer(input_ids=_A , attention_mask=_A)[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
return self.LinearTransformation(_A), embs
| 635 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(args.tf_model_dir , """parameters.json""" )
_SCREAMING_SNAKE_CASE : List[str] = json.loads(open(__UpperCamelCase ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(""".pt""" ):
_SCREAMING_SNAKE_CASE : Dict = args.output + '''.pt'''
_SCREAMING_SNAKE_CASE : int = OrderedDict()
with tf.device("""/CPU:0""" ):
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.train.load_checkpoint(args.tf_model_dir )
_SCREAMING_SNAKE_CASE : List[str] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_SCREAMING_SNAKE_CASE : int = reader.get_tensor(__UpperCamelCase ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
_SCREAMING_SNAKE_CASE : Optional[Any] = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
_SCREAMING_SNAKE_CASE : Tuple = 8
_SCREAMING_SNAKE_CASE : Dict = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_SCREAMING_SNAKE_CASE : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_SCREAMING_SNAKE_CASE : Dict = torch.tensor(__UpperCamelCase )
elif key_name.startswith("""model/moe""" ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
_SCREAMING_SNAKE_CASE : Optional[int] = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
_SCREAMING_SNAKE_CASE : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_SCREAMING_SNAKE_CASE : List[str] = torch.tensor(__UpperCamelCase )
elif key_name.endswith("""/softmlp/kernel""" ):
_SCREAMING_SNAKE_CASE : List[Any] = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
_SCREAMING_SNAKE_CASE : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(__UpperCamelCase )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
_SCREAMING_SNAKE_CASE : List[str] = key_name[-9:-7]
for i in range(16 ):
_SCREAMING_SNAKE_CASE : Dict = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
_SCREAMING_SNAKE_CASE : List[Any] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(__UpperCamelCase )
elif key_name.startswith("""model/mlp""" ):
_SCREAMING_SNAKE_CASE : Tuple = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
_SCREAMING_SNAKE_CASE : List[str] = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
_SCREAMING_SNAKE_CASE : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(__UpperCamelCase )
elif key_name.endswith("""/p1/bias""" ):
_SCREAMING_SNAKE_CASE : List[Any] = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
_SCREAMING_SNAKE_CASE : Any = vnp.copy() # same because it is one dimensional
_SCREAMING_SNAKE_CASE : Tuple = torch.tensor(__UpperCamelCase )
elif key_name.endswith("""/p2/kernel""" ):
_SCREAMING_SNAKE_CASE : str = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
_SCREAMING_SNAKE_CASE : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(__UpperCamelCase )
elif key_name.endswith("""/p2/bias""" ):
_SCREAMING_SNAKE_CASE : List[Any] = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
_SCREAMING_SNAKE_CASE : List[str] = vnp.copy() # same because it is one dimensional
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(__UpperCamelCase )
elif key_name.startswith("""model/ln""" ):
_SCREAMING_SNAKE_CASE : Optional[Any] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
_SCREAMING_SNAKE_CASE : Optional[Any] = '''model.blocks.%d.feed_forward.norm.bias''' % player
_SCREAMING_SNAKE_CASE : Union[str, Any] = vnp.copy() # same because it is one dimensional
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(__UpperCamelCase )
elif key_name.endswith("""/g""" ):
_SCREAMING_SNAKE_CASE : List[str] = '''model.blocks.%d.feed_forward.norm.weight''' % player
_SCREAMING_SNAKE_CASE : List[Any] = vnp.copy() # same because it is one dimensional
_SCREAMING_SNAKE_CASE : Tuple = torch.tensor(__UpperCamelCase )
elif key_name.startswith("""model/att""" ):
_SCREAMING_SNAKE_CASE : Any = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_SCREAMING_SNAKE_CASE : List[Any] = state[:, 0, :, :]
_SCREAMING_SNAKE_CASE : str = state[:, 1, :, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = state[:, 2, :, :]
_SCREAMING_SNAKE_CASE : List[Any] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_SCREAMING_SNAKE_CASE : Optional[int] = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_SCREAMING_SNAKE_CASE : Optional[int] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_SCREAMING_SNAKE_CASE : Optional[Any] = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
_SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Dict = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
_SCREAMING_SNAKE_CASE : Tuple = torch.tensor(__UpperCamelCase )
elif key_name.endswith("""/o/kernel""" ):
_SCREAMING_SNAKE_CASE : Optional[int] = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(__UpperCamelCase )
elif key_name.startswith("""model/an""" ):
_SCREAMING_SNAKE_CASE : Optional[Any] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
_SCREAMING_SNAKE_CASE : int = '''model.blocks.%d.self_attn.norm.bias''' % player
_SCREAMING_SNAKE_CASE : Union[str, Any] = vnp.copy() # same because it is one dimensional
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(__UpperCamelCase )
elif key_name.endswith("""/g""" ):
_SCREAMING_SNAKE_CASE : int = '''model.blocks.%d.self_attn.norm.weight''' % player
_SCREAMING_SNAKE_CASE : int = vnp.copy() # same because it is one dimensional
_SCREAMING_SNAKE_CASE : str = torch.tensor(__UpperCamelCase )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
_SCREAMING_SNAKE_CASE : List[str] = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
_SCREAMING_SNAKE_CASE : Optional[Any] = '''model.%s.weight''' % nlayer
_SCREAMING_SNAKE_CASE : List[str] = vnp.copy() # same in embedded
_SCREAMING_SNAKE_CASE : int = torch.tensor(__UpperCamelCase )
if key_name.startswith("""model/wte""" ):
_SCREAMING_SNAKE_CASE : Optional[int] = '''lm_head.weight'''
_SCREAMING_SNAKE_CASE : Any = vnp.copy() # same in embedded
_SCREAMING_SNAKE_CASE : str = torch.tensor(__UpperCamelCase )
elif key_name.startswith("""model/wob""" ):
_SCREAMING_SNAKE_CASE : Any = '''final_logits_bias'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = vnp.copy() # same in embedded
_SCREAMING_SNAKE_CASE : Dict = state.reshape((1, -1) )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(__UpperCamelCase )
elif key_name == "model/dense/kernel":
_SCREAMING_SNAKE_CASE : Any = '''model.last_project.weight'''
_SCREAMING_SNAKE_CASE : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_SCREAMING_SNAKE_CASE : int = torch.tensor(__UpperCamelCase )
elif key_name == "model/dense_1/bias":
_SCREAMING_SNAKE_CASE : List[Any] = '''model.last_project.bias'''
_SCREAMING_SNAKE_CASE : Any = vnp.copy() # same because it is one dimensional
_SCREAMING_SNAKE_CASE : int = torch.tensor(__UpperCamelCase )
torch.save(__UpperCamelCase , args.output )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowerCAmelCase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 711 | """simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
_SCREAMING_SNAKE_CASE : int = precision
_SCREAMING_SNAKE_CASE : Dict = ceil(precision / 14 )
_SCREAMING_SNAKE_CASE : int = 426_880 * Decimal(10_005 ).sqrt()
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
_SCREAMING_SNAKE_CASE : str = 13_591_409
_SCREAMING_SNAKE_CASE : Tuple = Decimal(__SCREAMING_SNAKE_CASE )
for k in range(1 , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(__SCREAMING_SNAKE_CASE ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCAmelCase_ = 50
print(F"The first {n} digits of pi is: {pi(n)}")
| 635 | 0 |
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
lowerCAmelCase_ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowerCAmelCase_ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Dict:
_SCREAMING_SNAKE_CASE : int = []
for i in range(len(lowerCamelCase_ ) ):
_SCREAMING_SNAKE_CASE : List[str] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_SCREAMING_SNAKE_CASE : str = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowerCamelCase_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowerCamelCase_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowerCamelCase_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_SCREAMING_SNAKE_CASE : Any = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowerCamelCase_ )
return next_generation
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : str = []
for _ in range(lowerCamelCase_ ):
# Create output image
_SCREAMING_SNAKE_CASE : Optional[Any] = Image.new("""RGB""" , (len(cells[0] ), len(lowerCamelCase_ )) )
_SCREAMING_SNAKE_CASE : int = img.load()
# Save cells to image
for x in range(len(lowerCamelCase_ ) ):
for y in range(len(cells[0] ) ):
_SCREAMING_SNAKE_CASE : int = 255 - cells[y][x] * 255
_SCREAMING_SNAKE_CASE : Tuple = (colour, colour, colour)
# Save image
images.append(lowerCamelCase_ )
_SCREAMING_SNAKE_CASE : int = new_generation(lowerCamelCase_ )
return images
if __name__ == "__main__":
lowerCAmelCase_ = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 712 | """simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
_SCREAMING_SNAKE_CASE : Optional[int] = TapasConfig.from_json_file(__SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
_SCREAMING_SNAKE_CASE : Dict = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_SCREAMING_SNAKE_CASE : str = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
_SCREAMING_SNAKE_CASE : Optional[int] = 4
_SCREAMING_SNAKE_CASE : Any = True
# hparam_utils.py hparams
_SCREAMING_SNAKE_CASE : Any = 0.66_46_94
_SCREAMING_SNAKE_CASE : str = 0.20_79_51
_SCREAMING_SNAKE_CASE : str = 0.12_11_94
_SCREAMING_SNAKE_CASE : List[Any] = True
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = 0.0_35_25_13
_SCREAMING_SNAKE_CASE : Optional[Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_SCREAMING_SNAKE_CASE : int = 4
_SCREAMING_SNAKE_CASE : Tuple = False
# hparam_utils.py hparams
_SCREAMING_SNAKE_CASE : Any = 36.45_19
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0.90_34_21
_SCREAMING_SNAKE_CASE : Optional[Any] = 2_22.0_88
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Optional[int] = True
_SCREAMING_SNAKE_CASE : Dict = 0.76_31_41
_SCREAMING_SNAKE_CASE : Union[str, Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
_SCREAMING_SNAKE_CASE : int = TapasForSequenceClassification(config=__SCREAMING_SNAKE_CASE )
elif task == "MLM":
_SCREAMING_SNAKE_CASE : int = TapasForMaskedLM(config=__SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
_SCREAMING_SNAKE_CASE : int = TapasModel(config=__SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
_SCREAMING_SNAKE_CASE : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 635 | 0 |
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str:
_SCREAMING_SNAKE_CASE : list[list[str]] = [[] for _ in range(__snake_case )]
_SCREAMING_SNAKE_CASE : List[str] = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(__snake_case ) <= key:
return input_string
for position, character in enumerate(__snake_case ):
_SCREAMING_SNAKE_CASE : str = position % (lowest * 2) # puts it in bounds
_SCREAMING_SNAKE_CASE : str = min(__snake_case , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["".join(__snake_case ) for row in temp_grid]
_SCREAMING_SNAKE_CASE : Optional[int] = "".join(__snake_case )
return output_string
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str:
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
_SCREAMING_SNAKE_CASE : Tuple = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
_SCREAMING_SNAKE_CASE : list[list[str]] = [[] for _ in range(__snake_case )] # generates template
for position in range(len(__snake_case ) ):
_SCREAMING_SNAKE_CASE : Tuple = position % (lowest * 2) # puts it in bounds
_SCREAMING_SNAKE_CASE : Union[str, Any] = min(__snake_case , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
_SCREAMING_SNAKE_CASE : str = 0
for row in temp_grid: # fills in the characters
_SCREAMING_SNAKE_CASE : Dict = input_string[counter : counter + len(__snake_case )]
grid.append(list(__snake_case ) )
counter += len(__snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = "" # reads as zigzag
for position in range(len(__snake_case ) ):
_SCREAMING_SNAKE_CASE : Any = position % (lowest * 2) # puts it in bounds
_SCREAMING_SNAKE_CASE : int = min(__snake_case , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> dict[int, str]:
_SCREAMING_SNAKE_CASE : Tuple = {}
for key_guess in range(1 , len(__snake_case ) ): # tries every key
_SCREAMING_SNAKE_CASE : Dict = decrypt(__snake_case , __snake_case )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 | """simple docstring"""
from typing import Any
import numpy as np
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool:
return np.array_equal(__SCREAMING_SNAKE_CASE , matrix.conjugate().T )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any:
_SCREAMING_SNAKE_CASE : Optional[int] = v.conjugate().T
_SCREAMING_SNAKE_CASE : Optional[int] = v_star.dot(__SCREAMING_SNAKE_CASE )
assert isinstance(__SCREAMING_SNAKE_CASE , np.ndarray )
return (v_star_dot.dot(__SCREAMING_SNAKE_CASE )) / (v_star.dot(__SCREAMING_SNAKE_CASE ))
def lowerCamelCase_()-> None:
_SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
_SCREAMING_SNAKE_CASE : int = np.array([[1], [2], [3]] )
assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian."""
print(rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
_SCREAMING_SNAKE_CASE : int = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian."""
assert rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 635 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 )-> List[str]:
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_SCREAMING_SNAKE_CASE = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
_SCREAMING_SNAKE_CASE = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=a__ , max_length=a__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_SCREAMING_SNAKE_CASE = datasets.map(
a__ , batched=a__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
_SCREAMING_SNAKE_CASE = 8
else:
_SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
a__ , padding="""longest""" , max_length=a__ , pad_to_multiple_of=a__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["""train"""] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
_SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["""validation"""] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , a__ ) == "1":
_SCREAMING_SNAKE_CASE = 2
# Initialize accelerator
_SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE = config["""lr"""]
_SCREAMING_SNAKE_CASE = int(config["""num_epochs"""] )
_SCREAMING_SNAKE_CASE = int(config["""seed"""] )
_SCREAMING_SNAKE_CASE = int(config["""batch_size"""] )
_SCREAMING_SNAKE_CASE = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_SCREAMING_SNAKE_CASE = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_SCREAMING_SNAKE_CASE = batch_size // MAX_GPU_BATCH_SIZE
_SCREAMING_SNAKE_CASE = MAX_GPU_BATCH_SIZE
set_seed(a__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = get_dataloaders(a__ , a__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=a__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_SCREAMING_SNAKE_CASE = model.to(accelerator.device )
# Instantiate optimizer
_SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=a__ )
# Instantiate scheduler
_SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=a__ , num_warmup_steps=100 , num_training_steps=(len(a__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ )
# Now we train the model
for epoch in range(a__ ):
model.train()
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_SCREAMING_SNAKE_CASE = model(**a__ )
_SCREAMING_SNAKE_CASE = outputs.loss
_SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps
accelerator.backward(a__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_SCREAMING_SNAKE_CASE = 0
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**a__ )
_SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(a__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_SCREAMING_SNAKE_CASE = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_SCREAMING_SNAKE_CASE = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=a__ , references=a__ , )
_SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , a__ )
def lowerCamelCase_()-> List[str]:
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=a__ , default=a__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(a__ , a__ )
if __name__ == "__main__":
main()
| 714 | """simple docstring"""
from __future__ import annotations
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 635 | 0 |
"""simple docstring"""
import pytest
lowerCAmelCase_ = '''__dummy_dataset1__'''
lowerCAmelCase_ = '''\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'''
@pytest.fixture
def lowerCamelCase_()-> int:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowerCamelCase_()-> Optional[int]:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]:
_SCREAMING_SNAKE_CASE : str = dataset_loading_script_name
_SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=UpperCamelCase__ )
_SCREAMING_SNAKE_CASE : Optional[int] = script_dir / F"""{script_name}.py"""
with open(UpperCamelCase__ , """w""" ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
| 715 | """simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 )-> str:
_SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetDict(
{
"""train""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ),
"""validation""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_SCREAMING_SNAKE_CASE : str = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_SCREAMING_SNAKE_CASE : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_SCREAMING_SNAKE_CASE : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_SCREAMING_SNAKE_CASE : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
_SCREAMING_SNAKE_CASE : Any = 8
else:
_SCREAMING_SNAKE_CASE : Optional[int] = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = DataLoader(
tokenized_datasets["""test"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader, test_dataloader
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
# New Code #
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
# Download the dataset
_SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("""glue""" , """mrpc""" )
# Create our splits
_SCREAMING_SNAKE_CASE : Dict = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_SCREAMING_SNAKE_CASE : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE : Tuple = config["""lr"""]
_SCREAMING_SNAKE_CASE : Tuple = int(config["""num_epochs"""] )
_SCREAMING_SNAKE_CASE : int = int(config["""seed"""] )
_SCREAMING_SNAKE_CASE : int = int(config["""batch_size"""] )
_SCREAMING_SNAKE_CASE : List[str] = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_SCREAMING_SNAKE_CASE : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_SCREAMING_SNAKE_CASE : List[str] = batch_size // MAX_GPU_BATCH_SIZE
_SCREAMING_SNAKE_CASE : List[str] = MAX_GPU_BATCH_SIZE
set_seed(__SCREAMING_SNAKE_CASE )
# New Code #
# Create our folds:
_SCREAMING_SNAKE_CASE : List[str] = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_SCREAMING_SNAKE_CASE : Optional[Any] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = get_fold_dataloaders(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_SCREAMING_SNAKE_CASE : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_SCREAMING_SNAKE_CASE : int = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
_SCREAMING_SNAKE_CASE : int = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(**__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = outputs.loss
_SCREAMING_SNAKE_CASE : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , )
_SCREAMING_SNAKE_CASE : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , __SCREAMING_SNAKE_CASE )
# New Code #
# We also run predictions on the test set at the very end
_SCREAMING_SNAKE_CASE : str = []
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 )
_SCREAMING_SNAKE_CASE : List[str] = torch.stack(__SCREAMING_SNAKE_CASE , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_SCREAMING_SNAKE_CASE : int = metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )
accelerator.print("""Average test metrics from all folds:""" , __SCREAMING_SNAKE_CASE )
def lowerCamelCase_()-> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=__SCREAMING_SNAKE_CASE , default=3 , help="""The number of splits to perform across the dataset""" )
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
_SCREAMING_SNAKE_CASE : Optional[int] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 635 | 0 |
"""simple docstring"""
import os
def lowerCamelCase_( )-> str:
with open(os.path.dirname(_UpperCAmelCase ) + """/grid.txt""" ) as f:
_SCREAMING_SNAKE_CASE : Optional[Any] = [] # noqa: E741
for _ in range(20 ):
l.append([int(_UpperCAmelCase ) for x in f.readline().split()] )
_SCREAMING_SNAKE_CASE : Dict = 0
# right
for i in range(20 ):
for j in range(17 ):
_SCREAMING_SNAKE_CASE : Tuple = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_SCREAMING_SNAKE_CASE : List[Any] = temp
# down
for i in range(17 ):
for j in range(20 ):
_SCREAMING_SNAKE_CASE : str = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_SCREAMING_SNAKE_CASE : Optional[int] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
_SCREAMING_SNAKE_CASE : Dict = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_SCREAMING_SNAKE_CASE : Union[str, Any] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
_SCREAMING_SNAKE_CASE : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_SCREAMING_SNAKE_CASE : Tuple = temp
return maximum
if __name__ == "__main__":
print(solution())
| 716 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 635 | 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowerCamelCase_()-> Optional[int]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__SCREAMING_SNAKE_CASE ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def lowerCamelCase_()-> Dict:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def lowerCamelCase_()-> Optional[int]:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__SCREAMING_SNAKE_CASE ):
http_head("""https://huggingface.co""" )
| 717 | """simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _snake_case :
"""simple docstring"""
def __init__( self : int , _A : List[Any] , _A : int , _A : int):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""")
_SCREAMING_SNAKE_CASE : str = img
_SCREAMING_SNAKE_CASE : Optional[Any] = img.shape[1]
_SCREAMING_SNAKE_CASE : Tuple = img.shape[0]
_SCREAMING_SNAKE_CASE : Any = dst_width
_SCREAMING_SNAKE_CASE : Any = dst_height
_SCREAMING_SNAKE_CASE : Any = self.src_w / self.dst_w
_SCREAMING_SNAKE_CASE : Dict = self.src_h / self.dst_h
_SCREAMING_SNAKE_CASE : Optional[Any] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 2_5_5
)
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
for i in range(self.dst_h):
for j in range(self.dst_w):
_SCREAMING_SNAKE_CASE : Any = self.img[self.get_y(_A)][self.get_x(_A)]
def _lowerCAmelCase ( self : int , _A : int):
"""simple docstring"""
return int(self.ratio_x * x)
def _lowerCAmelCase ( self : str , _A : int):
"""simple docstring"""
return int(self.ratio_y * y)
if __name__ == "__main__":
lowerCAmelCase_ , lowerCAmelCase_ = 800, 600
lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1)
lowerCAmelCase_ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output
)
waitKey(0)
destroyAllWindows()
| 635 | 0 |
import operator as op
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str:
_SCREAMING_SNAKE_CASE : List[str] = []
_SCREAMING_SNAKE_CASE : Optional[int] = lambda __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int(x / y ) # noqa: E731 integer division operation
_SCREAMING_SNAKE_CASE : List[str] = {
"""^""": op.pow,
"""*""": op.mul,
"""/""": div,
"""+""": op.add,
"""-""": op.sub,
} # operators & their respective operation
# print table header
print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ )
print("""-""" * (30 + len(_lowercase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_lowercase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(_lowercase ) , sep=""" | """ )
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(_lowercase ) , sep=""" | """ )
_SCREAMING_SNAKE_CASE : Optional[Any] = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(_lowercase ) , sep=""" | """ )
stack.append(
str(opr[x](int(_lowercase ) , int(_lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(_lowercase ) , sep=""" | """ , )
return int(stack[0] )
if __name__ == "__main__":
lowerCAmelCase_ = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 718 | """simple docstring"""
import argparse
from collections import defaultdict
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int:
_SCREAMING_SNAKE_CASE : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines()
_SCREAMING_SNAKE_CASE : Optional[Any] = F"""class {class_name}("""
_SCREAMING_SNAKE_CASE : List[Any] = F"""{4 * " "}def {test_name}("""
_SCREAMING_SNAKE_CASE : Tuple = F"""{8 * " "}{correct_line.split()[0]}"""
_SCREAMING_SNAKE_CASE : List[Any] = F"""{16 * " "}{correct_line.split()[0]}"""
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
_SCREAMING_SNAKE_CASE : Dict = []
for line in lines:
if line.startswith(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = True
elif in_class and line.startswith(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = True
elif in_class and in_func and (line.startswith(__SCREAMING_SNAKE_CASE ) or line.startswith(__SCREAMING_SNAKE_CASE )):
_SCREAMING_SNAKE_CASE : Dict = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_SCREAMING_SNAKE_CASE : int = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_SCREAMING_SNAKE_CASE : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_SCREAMING_SNAKE_CASE : Optional[int] = False
else:
new_lines.append(__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , """w""" ) as f:
for line in new_lines:
f.write(__SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None )-> Optional[Any]:
if fail is not None:
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {l.strip() for l in f.readlines()}
else:
_SCREAMING_SNAKE_CASE : str = None
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
_SCREAMING_SNAKE_CASE : str = f.readlines()
_SCREAMING_SNAKE_CASE : str = defaultdict(__SCREAMING_SNAKE_CASE )
for line in correct_lines:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
lowerCAmelCase_ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 635 | 0 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowerCAmelCase_ = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]:
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(_lowerCamelCase ) , version.parse(_lowerCamelCase ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = F"""\n{hint}""" if hint is not None else """"""
# non-versioned check
if re.match(R"""^[\w_\-\d]+$""" , _lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = requirement, None, None
else:
_SCREAMING_SNAKE_CASE : Dict = re.findall(R"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , _lowerCamelCase )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
F""" got {requirement}""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = match[0]
_SCREAMING_SNAKE_CASE : Dict = want_full.split(""",""" ) # there could be multiple requirements
_SCREAMING_SNAKE_CASE : List[str] = {}
for w in want_range:
_SCREAMING_SNAKE_CASE : Dict = re.findall(R"""^([\s!=<>]{1,2})(.+)""" , _lowerCamelCase )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
F""" but got {requirement}""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = match[0]
_SCREAMING_SNAKE_CASE : Any = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
_SCREAMING_SNAKE_CASE : Optional[Any] = """.""".join([str(_lowerCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return
# check if any version is installed
try:
_SCREAMING_SNAKE_CASE : Any = importlib.metadata.version(_lowerCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The \'{requirement}\' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[int] = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(_lowerCamelCase , _lowerCamelCase )
| 719 | """simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCAmelCase_ = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> str:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = create_model(
"""HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : Optional[Any] = R""".*sequential.(\d+).*"""
_SCREAMING_SNAKE_CASE : Any = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# replace sequential layers with list
_SCREAMING_SNAKE_CASE : List[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 )
_SCREAMING_SNAKE_CASE : Dict = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.""" )
elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[str] = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_SCREAMING_SNAKE_CASE : Dict = 1 if projecton_layer == 0 else 2
_SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
_SCREAMING_SNAKE_CASE : Dict = value
_SCREAMING_SNAKE_CASE : List[Any] = mixed_qkv.size(0 ) // 3
_SCREAMING_SNAKE_CASE : Optional[Any] = mixed_qkv[:qkv_dim]
_SCREAMING_SNAKE_CASE : str = mixed_qkv[qkv_dim : qkv_dim * 2]
_SCREAMING_SNAKE_CASE : Any = mixed_qkv[qkv_dim * 2 :]
_SCREAMING_SNAKE_CASE : Dict = query_layer
_SCREAMING_SNAKE_CASE : List[Any] = key_layer
_SCREAMING_SNAKE_CASE : Dict = value_layer
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = value
return model_state_dict
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> List[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE )
clap_model.eval()
_SCREAMING_SNAKE_CASE : Dict = clap_model.state_dict()
_SCREAMING_SNAKE_CASE : Tuple = rename_state_dict(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : int = ClapConfig()
_SCREAMING_SNAKE_CASE : Tuple = enable_fusion
_SCREAMING_SNAKE_CASE : Dict = ClapModel(__SCREAMING_SNAKE_CASE )
# ignore the spectrogram embedding layer
model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
lowerCAmelCase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 635 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class _snake_case :
"""simple docstring"""
def __init__( self : Optional[int] , _A : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : list[dict] = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []})
for keyword in keywords:
self.add_keyword(A__)
self.set_fail_transitions()
def _lowerCAmelCase ( self : Optional[Any] , _A : List[str] , _A : Optional[int]):
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _lowerCAmelCase ( self : Optional[int] , _A : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = 0
for character in keyword:
_SCREAMING_SNAKE_CASE : int = self.find_next_state(A__ , A__)
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
})
self.adlist[current_state]["next_states"].append(len(self.adlist) - 1)
_SCREAMING_SNAKE_CASE : List[Any] = len(self.adlist) - 1
else:
_SCREAMING_SNAKE_CASE : Optional[int] = next_state
self.adlist[current_state]["output"].append(A__)
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(A__)
_SCREAMING_SNAKE_CASE : Tuple = 0
while q:
_SCREAMING_SNAKE_CASE : List[Any] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(A__)
_SCREAMING_SNAKE_CASE : Dict = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(A__ , self.adlist[child]["""value"""]) is None
and state != 0
):
_SCREAMING_SNAKE_CASE : Tuple = self.adlist[state]["""fail_state"""]
_SCREAMING_SNAKE_CASE : Optional[int] = self.find_next_state(
A__ , self.adlist[child]["""value"""])
if self.adlist[child]["fail_state"] is None:
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def _lowerCAmelCase ( self : Optional[Any] , _A : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : dict = {} # returns a dict with keywords and list of its occurrences
_SCREAMING_SNAKE_CASE : List[str] = 0
for i in range(len(A__)):
while (
self.find_next_state(A__ , string[i]) is None
and current_state != 0
):
_SCREAMING_SNAKE_CASE : int = self.adlist[current_state]["""fail_state"""]
_SCREAMING_SNAKE_CASE : Optional[int] = self.find_next_state(A__ , string[i])
if next_state is None:
_SCREAMING_SNAKE_CASE : Optional[int] = 0
else:
_SCREAMING_SNAKE_CASE : List[Any] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_SCREAMING_SNAKE_CASE : List[Any] = []
result[key].append(i - len(A__) + 1)
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 | """simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_A , )
assert hasattr(self , """env""")
def _lowerCAmelCase ( self : Union[str, Any] , _A : str=1):
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , )
def _lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any]):
"""simple docstring"""
TrainingJobAnalytics(_A).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""")
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_SCREAMING_SNAKE_CASE : Any = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_SCREAMING_SNAKE_CASE : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""])
_SCREAMING_SNAKE_CASE : Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_SCREAMING_SNAKE_CASE : int = (
Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy)
assert all(t <= self.results["""eval_loss"""] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""") as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _A)
| 635 | 0 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
"""simple docstring"""
def __init__( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = """"""
_SCREAMING_SNAKE_CASE : Tuple = """"""
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : str = 2_5_6
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
def _lowerCAmelCase ( self : Dict , _A : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = cva.imread(_a , 0)
_SCREAMING_SNAKE_CASE : List[str] = copy.deepcopy(self.img)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""")
_SCREAMING_SNAKE_CASE : Optional[int] = np.sum(_a)
for i in range(len(_a)):
_SCREAMING_SNAKE_CASE : int = x[i] / self.k
self.sk += prk
_SCREAMING_SNAKE_CASE : Any = (self.L - 1) * self.sk
if self.rem != 0:
_SCREAMING_SNAKE_CASE : int = int(last % last)
_SCREAMING_SNAKE_CASE : List[Any] = int(last + 1 if self.rem >= 0.5 else last)
self.last_list.append(_a)
_SCREAMING_SNAKE_CASE : Any = int(np.ma.count(self.img) / self.img[1].size)
_SCREAMING_SNAKE_CASE : int = self.img[1].size
for i in range(self.number_of_cols):
for j in range(self.number_of_rows):
_SCREAMING_SNAKE_CASE : List[str] = self.img[j][i]
if num != self.last_list[num]:
_SCREAMING_SNAKE_CASE : str = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img)
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6])
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
cva.imshow("""Output-Image""" , self.img)
cva.imshow("""Input-Image""" , self.original_image)
cva.waitKey(5_0_0_0)
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase_ = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
lowerCAmelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 721 | """simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any:
return max(metric_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for gt in ground_truths )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]:
_SCREAMING_SNAKE_CASE : List[str] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()]
_SCREAMING_SNAKE_CASE : Dict = []
if args.gold_data_mode == "qa":
_SCREAMING_SNAKE_CASE : int = pd.read_csv(__SCREAMING_SNAKE_CASE , sep="""\t""" , header=__SCREAMING_SNAKE_CASE )
for answer_list in data[1]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ast.literal_eval(__SCREAMING_SNAKE_CASE )
answers.append(__SCREAMING_SNAKE_CASE )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()]
_SCREAMING_SNAKE_CASE : Optional[int] = [[reference] for reference in references]
_SCREAMING_SNAKE_CASE : Optional[int] = 0
for prediction, ground_truths in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
total += 1
em += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
fa += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = 1_00.0 * em / total
_SCREAMING_SNAKE_CASE : Optional[Any] = 1_00.0 * fa / total
logger.info(F"""F1: {fa:.2f}""" )
logger.info(F"""EM: {em:.2f}""" )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Tuple = args.k
_SCREAMING_SNAKE_CASE : int = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()]
_SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()]
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
for hypo, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = set(hypo.split("""\t""" )[:k] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_SCREAMING_SNAKE_CASE : int = 1_00.0 * em / total
logger.info(F"""Precision@{k}: {em: .2f}""" )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
def strip_title(__SCREAMING_SNAKE_CASE ):
if title.startswith("""\"""" ):
_SCREAMING_SNAKE_CASE : Optional[int] = title[1:]
if title.endswith("""\"""" ):
_SCREAMING_SNAKE_CASE : str = title[:-1]
return title
_SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , )["""input_ids"""].to(args.device )
_SCREAMING_SNAKE_CASE : List[str] = rag_model.rag.question_encoder(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Any = question_enc_outputs[0]
_SCREAMING_SNAKE_CASE : List[Any] = rag_model.retriever(
__SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
_SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
for docs in all_docs:
_SCREAMING_SNAKE_CASE : str = [strip_title(__SCREAMING_SNAKE_CASE ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(__SCREAMING_SNAKE_CASE ) )
return provenance_strings
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]:
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.input_ids.to(args.device )
_SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.attention_mask.to(args.device )
_SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.generate( # rag_model overwrites generate
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.generator_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
if args.print_predictions:
for q, a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
logger.info("""Q: {} - A: {}""".format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
return answers
def lowerCamelCase_()-> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__SCREAMING_SNAKE_CASE , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=__SCREAMING_SNAKE_CASE , choices=["""exact""", """compressed""", """legacy"""] , type=__SCREAMING_SNAKE_CASE , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=__SCREAMING_SNAKE_CASE , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__SCREAMING_SNAKE_CASE , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=__SCREAMING_SNAKE_CASE , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=__SCREAMING_SNAKE_CASE , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=__SCREAMING_SNAKE_CASE , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=__SCREAMING_SNAKE_CASE , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
_SCREAMING_SNAKE_CASE : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if args.model_type is None:
_SCREAMING_SNAKE_CASE : Optional[int] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
_SCREAMING_SNAKE_CASE : List[Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
_SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = args.index_name
if args.index_path is not None:
_SCREAMING_SNAKE_CASE : Any = args.index_path
else:
_SCREAMING_SNAKE_CASE : Any = BartForConditionalGeneration
_SCREAMING_SNAKE_CASE : int = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , __SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
_SCREAMING_SNAKE_CASE : Tuple = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(__SCREAMING_SNAKE_CASE ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
_SCREAMING_SNAKE_CASE : str = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , retriever=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
model.retriever.init_retrieval()
else:
_SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
_SCREAMING_SNAKE_CASE : str = []
for line in tqdm(__SCREAMING_SNAKE_CASE ):
questions.append(line.strip() )
if len(__SCREAMING_SNAKE_CASE ) == args.eval_batch_size:
_SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) + """\n""" )
preds_file.flush()
_SCREAMING_SNAKE_CASE : Any = []
if len(__SCREAMING_SNAKE_CASE ) > 0:
_SCREAMING_SNAKE_CASE : List[str] = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
preds_file.flush()
score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCAmelCase_ = get_args()
main(args)
| 635 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE : Optional[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_SCREAMING_SNAKE_CASE : List[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__))))
_SCREAMING_SNAKE_CASE : List[str] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_SCREAMING_SNAKE_CASE : Dict = {"""unk_token""": """<unk>"""}
_SCREAMING_SNAKE_CASE : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
_SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(UpperCamelCase__) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(UpperCamelCase__))
_SCREAMING_SNAKE_CASE : int = {
"""do_resize""": True,
"""size""": 2_0,
"""do_center_crop""": True,
"""crop_size""": 1_8,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , UpperCamelCase__)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__)
def _lowerCAmelCase ( self : Optional[Any] , **_A : Union[str, Any]):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__)
def _lowerCAmelCase ( self : Optional[Any] , **_A : Optional[Any]):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__)
def _lowerCAmelCase ( self : Union[str, Any] , **_A : Dict):
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__)
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta)]
_SCREAMING_SNAKE_CASE : Optional[int] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1)) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor()
_SCREAMING_SNAKE_CASE : Tuple = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__)
processor_slow.save_pretrained(self.tmpdirname)
_SCREAMING_SNAKE_CASE : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__)
_SCREAMING_SNAKE_CASE : Dict = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__)
processor_fast.save_pretrained(self.tmpdirname)
_SCREAMING_SNAKE_CASE : Dict = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__)
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__)
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__)
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
_SCREAMING_SNAKE_CASE : Dict = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0)
_SCREAMING_SNAKE_CASE : str = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCamelCase__)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCamelCase__)
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.get_image_processor()
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : str = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__)
_SCREAMING_SNAKE_CASE : Tuple = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE : Any = image_processor(UpperCamelCase__ , return_tensors="""np""")
_SCREAMING_SNAKE_CASE : int = processor(images=UpperCamelCase__ , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor()
_SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : Dict = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__)
_SCREAMING_SNAKE_CASE : List[Any] = """lower newer"""
_SCREAMING_SNAKE_CASE : List[str] = processor(text=UpperCamelCase__)
_SCREAMING_SNAKE_CASE : Any = tokenizer(UpperCamelCase__)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
_SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : Tuple = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__)
_SCREAMING_SNAKE_CASE : int = """lower newer"""
_SCREAMING_SNAKE_CASE : str = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE : Optional[int] = processor(text=UpperCamelCase__ , images=UpperCamelCase__)
self.assertListEqual(list(inputs.keys()) , ["""input_ids""", """attention_mask""", """pixel_values"""])
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__):
processor()
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor()
_SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : List[Any] = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE : Tuple = processor.batch_decode(UpperCamelCase__)
_SCREAMING_SNAKE_CASE : List[str] = tokenizer.batch_decode(UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = self.get_image_processor()
_SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : Tuple = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__)
_SCREAMING_SNAKE_CASE : List[str] = """lower newer"""
_SCREAMING_SNAKE_CASE : Any = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE : Tuple = processor(text=UpperCamelCase__ , images=UpperCamelCase__)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names) | 700 | """simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowerCamelCase_(__SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=1_026 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , __SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , )-> Union[str, Any]:
set_seed(3 )
# generate train_data and objective_set
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = generate_datasets(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , number=__SCREAMING_SNAKE_CASE , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_SCREAMING_SNAKE_CASE : Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
_SCREAMING_SNAKE_CASE : Any = load_gpta("""gpt2""" ).to(__SCREAMING_SNAKE_CASE )
print("""computing perplexity on objective set""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).item()
print("""perplexity on objective set:""" , __SCREAMING_SNAKE_CASE )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=15 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE="igf_model.pt" , )-> Optional[int]:
set_seed(42 )
# Load pre-trained model
_SCREAMING_SNAKE_CASE : Any = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
_SCREAMING_SNAKE_CASE : Union[str, Any] = SecondaryLearner(__SCREAMING_SNAKE_CASE )
# Train secondary learner
_SCREAMING_SNAKE_CASE : Any = train_secondary_learner(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_epochs=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , eval_freq=100 , igf_model_path=__SCREAMING_SNAKE_CASE , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_000 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=recopy_gpta , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Tuple = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = RandomSampler(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = DataLoader(__SCREAMING_SNAKE_CASE , sampler=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(__SCREAMING_SNAKE_CASE )) + 1
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : Any = torch.zeros((1, context_len) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = recopy_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
model.train()
if secondary_learner is not None:
secondary_learner.to(__SCREAMING_SNAKE_CASE )
secondary_learner.eval()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Optional[int] = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : int = []
# Compute the performance of the transformer model at the beginning
_SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
test_perps.append(__SCREAMING_SNAKE_CASE )
print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE )
for epoch in range(int(__SCREAMING_SNAKE_CASE ) ):
for step, example in enumerate(__SCREAMING_SNAKE_CASE ):
torch.cuda.empty_cache()
_SCREAMING_SNAKE_CASE : Any = random.randint(0 , example.size(2 ) - context_len - 1 )
_SCREAMING_SNAKE_CASE : int = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[str] = True
if secondary_learner is not None:
_SCREAMING_SNAKE_CASE : List[Any] = secondary_learner.forward(
torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__SCREAMING_SNAKE_CASE ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
_SCREAMING_SNAKE_CASE : Dict = -1
if predicted_q < threshold:
_SCREAMING_SNAKE_CASE : List[str] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_SCREAMING_SNAKE_CASE : Any = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
test_perps.append(__SCREAMING_SNAKE_CASE )
print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowerCamelCase_()-> Tuple:
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=__SCREAMING_SNAKE_CASE , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1_000 , type=__SCREAMING_SNAKE_CASE , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=128 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=__SCREAMING_SNAKE_CASE , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1_026 , type=__SCREAMING_SNAKE_CASE , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=__SCREAMING_SNAKE_CASE , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=__SCREAMING_SNAKE_CASE , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=__SCREAMING_SNAKE_CASE , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
_SCREAMING_SNAKE_CASE : Optional[int] = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
_SCREAMING_SNAKE_CASE : int = training_secondary_learner(
__SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
_SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=__SCREAMING_SNAKE_CASE , secondary_learner=__SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 635 | 0 |
"""simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _snake_case :
"""simple docstring"""
a = 42
a = None
a = None
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[Any]:
# Validation
def is_valid_tree(__SCREAMING_SNAKE_CASE ) -> bool:
if node is None:
return True
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , __SCREAMING_SNAKE_CASE , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , __SCREAMING_SNAKE_CASE )
)
return is_binary_search_tree_recursive_check(__SCREAMING_SNAKE_CASE , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 | """simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( __snake_case ):
"""simple docstring"""
a = ["image_processor", "tokenizer"]
a = "ChineseCLIPImageProcessor"
a = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Dict , _A : Tuple=None , _A : List[Any]=None , **_A : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _A , )
_SCREAMING_SNAKE_CASE : str = kwargs.pop("""feature_extractor""")
_SCREAMING_SNAKE_CASE : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(_A , _A)
_SCREAMING_SNAKE_CASE : Dict = self.image_processor
def __call__( self : Optional[int] , _A : Optional[Any]=None , _A : Any=None , _A : Tuple=None , **_A : int):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(_A , return_tensors=_A , **_A)
if images is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(_A , return_tensors=_A , **_A)
if text is not None and images is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A) , tensor_type=_A)
def _lowerCAmelCase ( self : str , *_A : Any , **_A : Any):
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A)
def _lowerCAmelCase ( self : Union[str, Any] , *_A : List[Any] , **_A : Any):
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A)
@property
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _A , )
return self.image_processor_class
| 635 | 0 |
"""simple docstring"""
from statistics import mean, stdev
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 3 )-> list:
_SCREAMING_SNAKE_CASE : str = min(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Optional[int] = max(SCREAMING_SNAKE_CASE_ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , SCREAMING_SNAKE_CASE_ ) for x in data]
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 3 )-> list:
_SCREAMING_SNAKE_CASE : int = mean(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : int = stdev(SCREAMING_SNAKE_CASE_ )
# standardize data
return [round((x - mu) / (sigma) , SCREAMING_SNAKE_CASE_ ) for x in data]
| 702 | """simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = ['''model.decoder.embed_positions.weights''']
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]:
if "emb" in name:
_SCREAMING_SNAKE_CASE : List[Any] = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
_SCREAMING_SNAKE_CASE : List[str] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
_SCREAMING_SNAKE_CASE : int = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
_SCREAMING_SNAKE_CASE : Dict = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
_SCREAMING_SNAKE_CASE : Dict = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
_SCREAMING_SNAKE_CASE : Tuple = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
_SCREAMING_SNAKE_CASE : str = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple[Dict, Dict]:
_SCREAMING_SNAKE_CASE : str = list(state_dict.keys() )
_SCREAMING_SNAKE_CASE : Tuple = {}
for key in keys:
_SCREAMING_SNAKE_CASE : Dict = state_dict.pop(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : int = rename_keys(__SCREAMING_SNAKE_CASE )
if "in_proj_weight" in key:
# split fused qkv proj
_SCREAMING_SNAKE_CASE : str = val[:hidden_size, :]
_SCREAMING_SNAKE_CASE : Any = val[hidden_size : 2 * hidden_size, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_SCREAMING_SNAKE_CASE : int = val
else:
_SCREAMING_SNAKE_CASE : Dict = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
_SCREAMING_SNAKE_CASE : Optional[Any] = 1_024
_SCREAMING_SNAKE_CASE : str = 24
_SCREAMING_SNAKE_CASE : Any = 16
elif checkpoint == "medium":
_SCREAMING_SNAKE_CASE : Dict = 1_536
_SCREAMING_SNAKE_CASE : Union[str, Any] = 48
_SCREAMING_SNAKE_CASE : Optional[Any] = 24
elif checkpoint == "large":
_SCREAMING_SNAKE_CASE : List[Any] = 2_048
_SCREAMING_SNAKE_CASE : Optional[int] = 48
_SCREAMING_SNAKE_CASE : str = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = MusicgenDecoderConfig(
hidden_size=__SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=__SCREAMING_SNAKE_CASE , num_attention_heads=__SCREAMING_SNAKE_CASE , )
return config
@torch.no_grad()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="cpu" )-> str:
_SCREAMING_SNAKE_CASE : str = MusicGen.get_pretrained(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[str] = decoder_config_from_checkpoint(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = fairseq_model.lm.state_dict()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = rename_state_dict(
__SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size )
_SCREAMING_SNAKE_CASE : Tuple = TaEncoderModel.from_pretrained("""t5-base""" )
_SCREAMING_SNAKE_CASE : List[Any] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
_SCREAMING_SNAKE_CASE : str = MusicgenForCausalLM(__SCREAMING_SNAKE_CASE ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
_SCREAMING_SNAKE_CASE : Dict = MusicgenForConditionalGeneration(text_encoder=__SCREAMING_SNAKE_CASE , audio_encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__SCREAMING_SNAKE_CASE )
# check we can do a forward pass
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_SCREAMING_SNAKE_CASE : Dict = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[int] = model(input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
_SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""t5-base""" )
_SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
_SCREAMING_SNAKE_CASE : Optional[int] = MusicgenProcessor(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
# set the appropriate bos/pad token ids
_SCREAMING_SNAKE_CASE : Optional[Any] = 2_048
_SCREAMING_SNAKE_CASE : List[Any] = 2_048
# set other default generation config params
_SCREAMING_SNAKE_CASE : Any = int(30 * audio_encoder.config.frame_rate )
_SCREAMING_SNAKE_CASE : Tuple = True
_SCREAMING_SNAKE_CASE : int = 3.0
if pytorch_dump_folder is not None:
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(__SCREAMING_SNAKE_CASE )
processor.push_to_hub(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 635 | 0 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class _snake_case :
"""simple docstring"""
a = 42 # [batch_size x 3]
a = 42 # [batch_size x 3]
a = 42 # [batch_size x 3]
a = 42 # [batch_size x 3]
a = 42
a = 42
a = 42
a = 42
a = 42
def _lowerCAmelCase ( self : str):
"""simple docstring"""
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape) == len(self.y.shape) == len(self.z.shape) == len(self.origin.shape) == 2
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa))
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa))
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.arange(self.height * self.width)
_SCREAMING_SNAKE_CASE : List[Any] = torch.stack(
[
pixel_indices % self.width,
torch.div(_A , self.width , rounding_mode="""trunc"""),
] , axis=1 , )
return coords
@property
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = self.shape
_SCREAMING_SNAKE_CASE : Dict = int(np.prod(_A))
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_coords()
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.broadcast_to(coords.unsqueeze(0) , [batch_size * inner_batch_size, *coords.shape])
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_camera_rays(_A)
_SCREAMING_SNAKE_CASE : Any = rays.view(_A , inner_batch_size * self.height * self.width , 2 , 3)
return rays
def _lowerCAmelCase ( self : Optional[int] , _A : torch.Tensor):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_SCREAMING_SNAKE_CASE : Union[str, Any] = coords.view(_A , -1 , 2)
_SCREAMING_SNAKE_CASE : Optional[int] = self.resolution()
_SCREAMING_SNAKE_CASE : List[str] = self.fov()
_SCREAMING_SNAKE_CASE : int = (flat.float() / (res - 1)) * 2 - 1
_SCREAMING_SNAKE_CASE : str = fracs * torch.tan(fov / 2)
_SCREAMING_SNAKE_CASE : Tuple = fracs.view(_A , -1 , 2)
_SCREAMING_SNAKE_CASE : Optional[int] = (
self.z.view(_A , 1 , 3)
+ self.x.view(_A , 1 , 3) * fracs[:, :, :1]
+ self.y.view(_A , 1 , 3) * fracs[:, :, 1:]
)
_SCREAMING_SNAKE_CASE : List[Any] = directions / directions.norm(dim=-1 , keepdim=_A)
_SCREAMING_SNAKE_CASE : Any = torch.stack(
[
torch.broadcast_to(self.origin.view(_A , 1 , 3) , [batch_size, directions.shape[1], 3]),
directions,
] , dim=2 , )
return rays.view(_A , *_A , 2 , 3)
def _lowerCAmelCase ( self : Dict , _A : int , _A : int):
"""simple docstring"""
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_A , height=_A , x_fov=self.x_fov , y_fov=self.y_fov , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> DifferentiableProjectiveCamera:
_SCREAMING_SNAKE_CASE : str = []
_SCREAMING_SNAKE_CASE : List[Any] = []
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
_SCREAMING_SNAKE_CASE : Optional[int] = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
_SCREAMING_SNAKE_CASE : Optional[Any] = np.array([np.sin(SCREAMING_SNAKE_CASE_ ), np.cos(SCREAMING_SNAKE_CASE_ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_SCREAMING_SNAKE_CASE : List[Any] = -z * 4
_SCREAMING_SNAKE_CASE : Tuple = np.array([np.cos(SCREAMING_SNAKE_CASE_ ), -np.sin(SCREAMING_SNAKE_CASE_ ), 0.0] )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.cross(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
origins.append(SCREAMING_SNAKE_CASE_ )
xs.append(SCREAMING_SNAKE_CASE_ )
ys.append(SCREAMING_SNAKE_CASE_ )
zs.append(SCREAMING_SNAKE_CASE_ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(SCREAMING_SNAKE_CASE_ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(SCREAMING_SNAKE_CASE_ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(SCREAMING_SNAKE_CASE_ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(SCREAMING_SNAKE_CASE_ , axis=0 ) ).float() , width=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(SCREAMING_SNAKE_CASE_ )) , )
| 703 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "sew"
def __init__( self : List[Any] , _A : Tuple=3_2 , _A : str=7_6_8 , _A : Dict=1_2 , _A : Tuple=1_2 , _A : Optional[Any]=3_0_7_2 , _A : List[str]=2 , _A : Dict="gelu" , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.0 , _A : str=0.1 , _A : Tuple=0.1 , _A : Optional[int]=0.02 , _A : Dict=1e-5 , _A : str="group" , _A : Tuple="gelu" , _A : Union[str, Any]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _A : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _A : Any=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _A : Tuple=False , _A : Tuple=1_2_8 , _A : int=1_6 , _A : Union[str, Any]=True , _A : Optional[Any]=0.05 , _A : List[Any]=1_0 , _A : Union[str, Any]=2 , _A : Tuple=0.0 , _A : Union[str, Any]=1_0 , _A : Optional[int]=0 , _A : Union[str, Any]="mean" , _A : Optional[int]=False , _A : List[Any]=False , _A : int=2_5_6 , _A : str=0 , _A : Optional[int]=1 , _A : List[Any]=2 , **_A : Dict , ):
"""simple docstring"""
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A)
_SCREAMING_SNAKE_CASE : str = hidden_size
_SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_norm
_SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_activation
_SCREAMING_SNAKE_CASE : Dict = list(_A)
_SCREAMING_SNAKE_CASE : int = list(_A)
_SCREAMING_SNAKE_CASE : int = list(_A)
_SCREAMING_SNAKE_CASE : str = conv_bias
_SCREAMING_SNAKE_CASE : Tuple = num_conv_pos_embeddings
_SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embedding_groups
_SCREAMING_SNAKE_CASE : Tuple = len(self.conv_dim)
_SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[str] = intermediate_size
_SCREAMING_SNAKE_CASE : str = squeeze_factor
_SCREAMING_SNAKE_CASE : Dict = hidden_act
_SCREAMING_SNAKE_CASE : str = num_attention_heads
_SCREAMING_SNAKE_CASE : Dict = hidden_dropout
_SCREAMING_SNAKE_CASE : Tuple = attention_dropout
_SCREAMING_SNAKE_CASE : int = activation_dropout
_SCREAMING_SNAKE_CASE : Any = feat_proj_dropout
_SCREAMING_SNAKE_CASE : str = final_dropout
_SCREAMING_SNAKE_CASE : Union[str, Any] = layerdrop
_SCREAMING_SNAKE_CASE : Any = layer_norm_eps
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : List[Any] = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_SCREAMING_SNAKE_CASE : List[Any] = apply_spec_augment
_SCREAMING_SNAKE_CASE : List[Any] = mask_time_prob
_SCREAMING_SNAKE_CASE : List[str] = mask_time_length
_SCREAMING_SNAKE_CASE : List[Any] = mask_time_min_masks
_SCREAMING_SNAKE_CASE : List[Any] = mask_feature_prob
_SCREAMING_SNAKE_CASE : int = mask_feature_length
_SCREAMING_SNAKE_CASE : List[Any] = mask_feature_min_masks
# ctc loss
_SCREAMING_SNAKE_CASE : int = ctc_loss_reduction
_SCREAMING_SNAKE_CASE : Optional[int] = ctc_zero_infinity
# sequence classification
_SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum
_SCREAMING_SNAKE_CASE : List[str] = classifier_proj_size
@property
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1)
| 635 | 0 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowerCamelCase_(__SCREAMING_SNAKE_CASE = 1_000_000 , __SCREAMING_SNAKE_CASE = 10 )-> str:
_SCREAMING_SNAKE_CASE : defaultdict = defaultdict(__lowercase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_SCREAMING_SNAKE_CASE : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_SCREAMING_SNAKE_CASE : List[Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__lowercase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"{solution() = }")
| 704 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 635 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , _A : Dict , _A : Tuple=7 , _A : Tuple=3 , _A : int=1_8 , _A : List[str]=3_0 , _A : Optional[int]=4_0_0 , _A : str=True , _A : Dict=None , _A : List[str]=True , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = size if size is not None else {"""height""": 1_8, """width""": 1_8}
_SCREAMING_SNAKE_CASE : List[Any] = parent
_SCREAMING_SNAKE_CASE : str = batch_size
_SCREAMING_SNAKE_CASE : Optional[int] = num_channels
_SCREAMING_SNAKE_CASE : str = image_size
_SCREAMING_SNAKE_CASE : List[str] = min_resolution
_SCREAMING_SNAKE_CASE : int = max_resolution
_SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize
_SCREAMING_SNAKE_CASE : str = size
_SCREAMING_SNAKE_CASE : int = do_normalize
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _snake_case ( __lowercase , unittest.TestCase ):
"""simple docstring"""
a = ImageGPTImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = ImageGPTImageProcessingTester(self)
@property
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_A , """clusters"""))
self.assertTrue(hasattr(_A , """do_resize"""))
self.assertTrue(hasattr(_A , """size"""))
self.assertTrue(hasattr(_A , """do_normalize"""))
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8})
_SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2})
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict)
_SCREAMING_SNAKE_CASE : List[Any] = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_A , obj[key]))
else:
self.assertEqual(obj[key] , _A)
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(_A , """image_processor.json""")
image_processor_first.to_json_file(_A)
_SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_json_file(_A).to_dict()
_SCREAMING_SNAKE_CASE : List[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_A , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , _A)
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class.from_pretrained(_A).to_dict()
_SCREAMING_SNAKE_CASE : Tuple = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_A , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , _A)
@unittest.skip("""ImageGPT requires clusters at initialization""")
def _lowerCAmelCase ( self : int):
"""simple docstring"""
pass
def lowerCamelCase_()-> Optional[Any]:
_SCREAMING_SNAKE_CASE : Dict = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
_SCREAMING_SNAKE_CASE : List[Any] = Image.open(dataset[4]["""file"""] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(dataset[5]["""file"""] )
_SCREAMING_SNAKE_CASE : Tuple = [imagea, imagea]
return images
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""")
_SCREAMING_SNAKE_CASE : int = prepare_images()
# test non-batched
_SCREAMING_SNAKE_CASE : Any = image_processing(images[0] , return_tensors="""pt""")
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4))
_SCREAMING_SNAKE_CASE : Optional[int] = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _A)
# test batched
_SCREAMING_SNAKE_CASE : str = image_processing(_A , return_tensors="""pt""")
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4))
_SCREAMING_SNAKE_CASE : Optional[Any] = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _A)
| 705 | """simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
_SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple:
_SCREAMING_SNAKE_CASE : int = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : List[Any] = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE : List[Any] = (
Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple:
_SCREAMING_SNAKE_CASE : Tuple = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : Dict = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str:
if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = parquet_path
elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path]
_SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=("train",) )-> Union[str, Any]:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for split in splits:
_SCREAMING_SNAKE_CASE : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
_SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : List[str] = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE : str = (
Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE : int = ParquetDatasetReader({"""train""": parquet_path} , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
if split:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {split: parquet_path}
else:
_SCREAMING_SNAKE_CASE : Optional[int] = """train"""
_SCREAMING_SNAKE_CASE : Any = {"""train""": parquet_path, """test""": parquet_path}
_SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]:
_SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / """foo.parquet""" )
_SCREAMING_SNAKE_CASE : str = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Dict = str(shared_datadir / """test_image_rgb.jpg""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""image""": [image_path]}
_SCREAMING_SNAKE_CASE : Optional[Any] = Features({"""image""": Image()} )
_SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__SCREAMING_SNAKE_CASE ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int:
assert get_writer_batch_size(__SCREAMING_SNAKE_CASE ) == expected
| 635 | 0 |
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _snake_case ( __a , unittest.TestCase ):
"""simple docstring"""
a = DownBlockaD # noqa F405
a = '''down'''
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = [-0.0_232, -0.9_869, 0.8_054, -0.0_637, -0.1_688, -1.4_264, 0.4_470, -1.3_394, 0.0_904]
super().test_output(snake_case__)
class _snake_case ( __a , unittest.TestCase ):
"""simple docstring"""
a = ResnetDownsampleBlockaD # noqa F405
a = '''down'''
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = [0.0_710, 0.2_410, -0.7_320, -1.0_757, -1.1_343, 0.3_540, -0.0_133, -0.2_576, 0.0_948]
super().test_output(snake_case__)
class _snake_case ( __a , unittest.TestCase ):
"""simple docstring"""
a = AttnDownBlockaD # noqa F405
a = '''down'''
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = [0.0_636, 0.8_964, -0.6_234, -1.0_131, 0.0_844, 0.4_935, 0.3_437, 0.0_911, -0.2_957]
super().test_output(snake_case__)
class _snake_case ( __a , unittest.TestCase ):
"""simple docstring"""
a = CrossAttnDownBlockaD # noqa F405
a = '''down'''
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = super().prepare_init_args_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Tuple = 3_2
return init_dict, inputs_dict
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = [0.2_238, -0.7_396, -0.2_255, -0.3_829, 0.1_925, 1.1_665, 0.0_603, -0.7_295, 0.1_983]
super().test_output(snake_case__)
class _snake_case ( __a , unittest.TestCase ):
"""simple docstring"""
a = SimpleCrossAttnDownBlockaD # noqa F405
a = '''down'''
@property
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=snake_case__)
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = super().prepare_init_args_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Optional[Any] = 3_2
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""")
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = [0.7_921, -0.0_992, -0.1_962, -0.7_695, -0.4_242, 0.7_804, 0.4_737, 0.2_765, 0.3_338]
super().test_output(snake_case__)
class _snake_case ( __a , unittest.TestCase ):
"""simple docstring"""
a = SkipDownBlockaD # noqa F405
a = '''down'''
@property
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=snake_case__)
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = [-0.0_845, -0.2_087, -0.2_465, 0.0_971, 0.1_900, -0.0_484, 0.2_664, 0.4_179, 0.5_069]
super().test_output(snake_case__)
class _snake_case ( __a , unittest.TestCase ):
"""simple docstring"""
a = AttnSkipDownBlockaD # noqa F405
a = '''down'''
@property
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=snake_case__)
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = [0.5_539, 0.1_609, 0.4_924, 0.0_537, -0.1_995, 0.4_050, 0.0_979, -0.2_721, -0.0_642]
super().test_output(snake_case__)
class _snake_case ( __a , unittest.TestCase ):
"""simple docstring"""
a = DownEncoderBlockaD # noqa F405
a = '''down'''
@property
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return super().get_dummy_input(include_temb=snake_case__)
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = {
"""in_channels""": 3_2,
"""out_channels""": 3_2,
}
_SCREAMING_SNAKE_CASE : Dict = self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = [1.1_102, 0.5_302, 0.4_872, -0.0_023, -0.8_042, 0.0_483, -0.3_489, -0.5_632, 0.7_626]
super().test_output(snake_case__)
class _snake_case ( __a , unittest.TestCase ):
"""simple docstring"""
a = AttnDownEncoderBlockaD # noqa F405
a = '''down'''
@property
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
return super().get_dummy_input(include_temb=snake_case__)
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = {
"""in_channels""": 3_2,
"""out_channels""": 3_2,
}
_SCREAMING_SNAKE_CASE : int = self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = [0.8_966, -0.1_486, 0.8_568, 0.8_141, -0.9_046, -0.1_342, -0.0_972, -0.7_417, 0.1_538]
super().test_output(snake_case__)
class _snake_case ( __a , unittest.TestCase ):
"""simple docstring"""
a = UNetMidBlockaD # noqa F405
a = '''mid'''
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = {
"""in_channels""": 3_2,
"""temb_channels""": 1_2_8,
}
_SCREAMING_SNAKE_CASE : Dict = self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = [-0.1_062, 1.7_248, 0.3_494, 1.4_569, -0.0_910, -1.2_421, -0.9_984, 0.6_736, 1.0_028]
super().test_output(snake_case__)
class _snake_case ( __a , unittest.TestCase ):
"""simple docstring"""
a = UNetMidBlockaDCrossAttn # noqa F405
a = '''mid'''
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = super().prepare_init_args_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Tuple = 3_2
return init_dict, inputs_dict
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = [0.0_187, 2.4_220, 0.4_484, 1.1_203, -0.6_121, -1.5_122, -0.8_270, 0.7_851, 1.8_335]
super().test_output(snake_case__)
class _snake_case ( __a , unittest.TestCase ):
"""simple docstring"""
a = UNetMidBlockaDSimpleCrossAttn # noqa F405
a = '''mid'''
@property
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=snake_case__)
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = super().prepare_init_args_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Any = 3_2
return init_dict, inputs_dict
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = [0.7_143, 1.9_974, 0.5_448, 1.3_977, 0.1_282, -1.1_237, -1.4_238, 0.5_530, 0.8_880]
super().test_output(snake_case__)
class _snake_case ( __a , unittest.TestCase ):
"""simple docstring"""
a = UpBlockaD # noqa F405
a = '''up'''
@property
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__)
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = [-0.2_041, -0.4_165, -0.3_022, 0.0_041, -0.6_628, -0.7_053, 0.1_928, -0.0_325, 0.0_523]
super().test_output(snake_case__)
class _snake_case ( __a , unittest.TestCase ):
"""simple docstring"""
a = ResnetUpsampleBlockaD # noqa F405
a = '''up'''
@property
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__)
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = [0.2_287, 0.3_549, -0.1_346, 0.4_797, -0.1_715, -0.9_649, 0.7_305, -0.5_864, -0.6_244]
super().test_output(snake_case__)
class _snake_case ( __a , unittest.TestCase ):
"""simple docstring"""
a = CrossAttnUpBlockaD # noqa F405
a = '''up'''
@property
def _lowerCAmelCase ( self : int):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__)
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = super().prepare_init_args_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Optional[int] = 3_2
return init_dict, inputs_dict
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = [-0.1_403, -0.3_515, -0.0_420, -0.1_425, 0.3_167, 0.5_094, -0.2_181, 0.5_931, 0.5_582]
super().test_output(snake_case__)
class _snake_case ( __a , unittest.TestCase ):
"""simple docstring"""
a = SimpleCrossAttnUpBlockaD # noqa F405
a = '''up'''
@property
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__ , include_encoder_hidden_states=snake_case__)
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = super().prepare_init_args_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Optional[Any] = 3_2
return init_dict, inputs_dict
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = [0.2_645, 0.1_480, 0.0_909, 0.8_044, -0.9_758, -0.9_083, 0.0_994, -1.1_453, -0.7_402]
super().test_output(snake_case__)
class _snake_case ( __a , unittest.TestCase ):
"""simple docstring"""
a = AttnUpBlockaD # noqa F405
a = '''up'''
@property
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__)
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""")
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = [0.0_979, 0.1_326, 0.0_021, 0.0_659, 0.2_249, 0.0_059, 0.1_132, 0.5_952, 0.1_033]
super().test_output(snake_case__)
class _snake_case ( __a , unittest.TestCase ):
"""simple docstring"""
a = SkipUpBlockaD # noqa F405
a = '''up'''
@property
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__)
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = [-0.0_893, -0.1_234, -0.1_506, -0.0_332, 0.0_123, -0.0_211, 0.0_566, 0.0_143, 0.0_362]
super().test_output(snake_case__)
class _snake_case ( __a , unittest.TestCase ):
"""simple docstring"""
a = AttnSkipUpBlockaD # noqa F405
a = '''up'''
@property
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__)
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = [0.0_361, 0.0_617, 0.2_787, -0.0_350, 0.0_342, 0.3_421, -0.0_843, 0.0_913, 0.3_015]
super().test_output(snake_case__)
class _snake_case ( __a , unittest.TestCase ):
"""simple docstring"""
a = UpDecoderBlockaD # noqa F405
a = '''up'''
@property
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
return super().get_dummy_input(include_temb=snake_case__)
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = {"""in_channels""": 3_2, """out_channels""": 3_2}
_SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = [0.4_404, 0.1_998, -0.9_886, -0.3_320, -0.3_128, -0.7_034, -0.6_955, -0.2_338, -0.3_137]
super().test_output(snake_case__)
class _snake_case ( __a , unittest.TestCase ):
"""simple docstring"""
a = AttnUpDecoderBlockaD # noqa F405
a = '''up'''
@property
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
return super().get_dummy_input(include_temb=snake_case__)
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = {"""in_channels""": 3_2, """out_channels""": 3_2}
_SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = [0.6_738, 0.4_491, 0.1_055, 1.0_710, 0.7_316, 0.3_339, 0.3_352, 0.1_023, 0.3_568]
super().test_output(snake_case__)
| 706 | """simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""only integers accepted as input""" )
else:
_SCREAMING_SNAKE_CASE : List[Any] = str(abs(__SCREAMING_SNAKE_CASE ) )
_SCREAMING_SNAKE_CASE : List[str] = [list(__SCREAMING_SNAKE_CASE ) for char in range(len(__SCREAMING_SNAKE_CASE ) )]
for index in range(len(__SCREAMING_SNAKE_CASE ) ):
num_transpositions[index].pop(__SCREAMING_SNAKE_CASE )
return max(
int("""""".join(list(__SCREAMING_SNAKE_CASE ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 635 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
lowerCAmelCase_ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
lowerCAmelCase_ = 3
lowerCAmelCase_ = 4
class _snake_case ( __lowerCamelCase ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = '''left'''
def __init__( self : Tuple , _A : Dict , _A : List[str]=False , _A : Optional[Any]=True , _A : Optional[Any]=False , _A : Optional[Any]="<s>" , _A : Optional[int]="</s>" , _A : Optional[Any]="<unk>" , _A : Optional[Any]="<sep>" , _A : List[str]="<pad>" , _A : Any="<cls>" , _A : Any="<mask>" , _A : Dict=["<eop>", "<eod>"] , _A : Tuple = None , **_A : Optional[int] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = AddedToken(a_ , lstrip=a_ , rstrip=a_) if isinstance(a_ , a_) else mask_token
_SCREAMING_SNAKE_CASE : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
_SCREAMING_SNAKE_CASE : str = 3
_SCREAMING_SNAKE_CASE : Dict = do_lower_case
_SCREAMING_SNAKE_CASE : str = remove_space
_SCREAMING_SNAKE_CASE : Tuple = keep_accents
_SCREAMING_SNAKE_CASE : Dict = vocab_file
_SCREAMING_SNAKE_CASE : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(a_)
@property
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return len(self.sp_model)
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = {self.convert_ids_to_tokens(a_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = self.__dict__.copy()
_SCREAMING_SNAKE_CASE : Optional[int] = None
return state
def __setstate__( self : Dict , _A : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
_SCREAMING_SNAKE_CASE : int = {}
_SCREAMING_SNAKE_CASE : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _lowerCAmelCase ( self : str , _A : Tuple):
"""simple docstring"""
if self.remove_space:
_SCREAMING_SNAKE_CASE : Optional[int] = " ".join(inputs.strip().split())
else:
_SCREAMING_SNAKE_CASE : str = inputs
_SCREAMING_SNAKE_CASE : Any = outputs.replace("""``""" , """\"""").replace("""''""" , """\"""")
if not self.keep_accents:
_SCREAMING_SNAKE_CASE : Dict = unicodedata.normalize("""NFKD""" , a_)
_SCREAMING_SNAKE_CASE : int = "".join([c for c in outputs if not unicodedata.combining(a_)])
if self.do_lower_case:
_SCREAMING_SNAKE_CASE : Any = outputs.lower()
return outputs
def _lowerCAmelCase ( self : Dict , _A : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = self.preprocess_text(a_)
_SCREAMING_SNAKE_CASE : Optional[int] = self.sp_model.encode(a_ , out_type=a_)
_SCREAMING_SNAKE_CASE : List[str] = []
for piece in pieces:
if len(a_) > 1 and piece[-1] == str(""",""") and piece[-2].isdigit():
_SCREAMING_SNAKE_CASE : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , """"""))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
_SCREAMING_SNAKE_CASE : int = cur_pieces[1:]
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(a_)
else:
new_pieces.append(a_)
return new_pieces
def _lowerCAmelCase ( self : List[Any] , _A : Optional[Any]):
"""simple docstring"""
return self.sp_model.PieceToId(a_)
def _lowerCAmelCase ( self : Optional[Any] , _A : str):
"""simple docstring"""
return self.sp_model.IdToPiece(a_)
def _lowerCAmelCase ( self : List[str] , _A : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = "".join(a_).replace(a_ , """ """).strip()
return out_string
def _lowerCAmelCase ( self : Optional[Any] , _A : Union[str, Any] , _A : Any = False , _A : Optional[Any] = None , _A : List[str] = True , **_A : Any , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = kwargs.pop("""use_source_tokenizer""" , a_)
_SCREAMING_SNAKE_CASE : List[str] = self.convert_ids_to_tokens(a_ , skip_special_tokens=a_)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_SCREAMING_SNAKE_CASE : Optional[int] = []
_SCREAMING_SNAKE_CASE : List[str] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_))
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
sub_texts.append(a_)
else:
current_sub_text.append(a_)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_SCREAMING_SNAKE_CASE : Union[str, Any] = "".join(a_)
_SCREAMING_SNAKE_CASE : Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_SCREAMING_SNAKE_CASE : List[Any] = self.clean_up_tokenization(a_)
return clean_text
else:
return text
def _lowerCAmelCase ( self : Tuple , _A : Optional[Any] , _A : Tuple = None):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self : int , _A : Union[str, Any] , _A : Optional[Any] = None , _A : Optional[int] = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_)
if token_ids_a is not None:
return ([0] * len(a_)) + [1] + ([0] * len(a_)) + [1, 1]
return ([0] * len(a_)) + [1, 1]
def _lowerCAmelCase ( self : Union[str, Any] , _A : Optional[int] , _A : List[Any] = None):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowerCAmelCase ( self : int , _A : Optional[Any] , _A : int = None):
"""simple docstring"""
if not os.path.isdir(a_):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
_SCREAMING_SNAKE_CASE : Any = os.path.join(
a_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(a_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , a_)
elif not os.path.isfile(self.vocab_file):
with open(a_ , """wb""") as fi:
_SCREAMING_SNAKE_CASE : Dict = self.sp_model.serialized_model_proto()
fi.write(a_)
return (out_vocab_file,)
| 707 | """simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A)
_SCREAMING_SNAKE_CASE : Any = -1
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
_SCREAMING_SNAKE_CASE : Dict = model.generate(_A , max_new_tokens=1_0 , do_sample=_A)
_SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(greedy_ids[0])
with CaptureStdout() as cs:
_SCREAMING_SNAKE_CASE : Any = TextStreamer(_A)
model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_SCREAMING_SNAKE_CASE : str = cs.out[:-1]
self.assertEqual(_A , _A)
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
_SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A)
_SCREAMING_SNAKE_CASE : List[Any] = -1
_SCREAMING_SNAKE_CASE : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A)
_SCREAMING_SNAKE_CASE : Any = tokenizer.decode(greedy_ids[0])
_SCREAMING_SNAKE_CASE : List[Any] = TextIteratorStreamer(_A)
_SCREAMING_SNAKE_CASE : Any = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer}
_SCREAMING_SNAKE_CASE : List[Any] = Thread(target=model.generate , kwargs=_A)
thread.start()
_SCREAMING_SNAKE_CASE : Any = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_A , _A)
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
_SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A)
_SCREAMING_SNAKE_CASE : Any = -1
_SCREAMING_SNAKE_CASE : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A)
_SCREAMING_SNAKE_CASE : str = greedy_ids[:, input_ids.shape[1] :]
_SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(new_greedy_ids[0])
with CaptureStdout() as cs:
_SCREAMING_SNAKE_CASE : Any = TextStreamer(_A , skip_prompt=_A)
model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_SCREAMING_SNAKE_CASE : Optional[int] = cs.out[:-1]
self.assertEqual(_A , _A)
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""distilgpt2""")
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""").to(_A)
_SCREAMING_SNAKE_CASE : int = -1
_SCREAMING_SNAKE_CASE : List[str] = torch.ones((1, 5) , device=_A).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_SCREAMING_SNAKE_CASE : Optional[int] = TextStreamer(_A , skip_special_tokens=_A)
model.generate(_A , max_new_tokens=1 , do_sample=_A , streamer=_A)
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_SCREAMING_SNAKE_CASE : Optional[Any] = cs.out[:-1] # Remove the final "\n"
_SCREAMING_SNAKE_CASE : Tuple = tokenizer(_A , return_tensors="""pt""")
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1))
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
_SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A)
_SCREAMING_SNAKE_CASE : Tuple = -1
_SCREAMING_SNAKE_CASE : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
_SCREAMING_SNAKE_CASE : int = TextIteratorStreamer(_A , timeout=0.001)
_SCREAMING_SNAKE_CASE : List[Any] = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer}
_SCREAMING_SNAKE_CASE : List[str] = Thread(target=model.generate , kwargs=_A)
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_A):
_SCREAMING_SNAKE_CASE : str = """"""
for new_text in streamer:
streamer_text += new_text
| 635 | 0 |
"""simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Union[str, Any]: # noqa: E741
_SCREAMING_SNAKE_CASE : Optional[int] = len(_snake_case )
_SCREAMING_SNAKE_CASE : Optional[int] = 0
_SCREAMING_SNAKE_CASE : Optional[int] = [0] * n
_SCREAMING_SNAKE_CASE : List[Any] = [False] * n
_SCREAMING_SNAKE_CASE : Optional[int] = [False] * n
def dfs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if parent == root:
out_edge_count += 1
_SCREAMING_SNAKE_CASE : Tuple = True
_SCREAMING_SNAKE_CASE : int = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_SCREAMING_SNAKE_CASE : int = dfs(_snake_case , _snake_case , _snake_case , _snake_case )
_SCREAMING_SNAKE_CASE : Any = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_SCREAMING_SNAKE_CASE : List[str] = True
# AP found via cycle
if at == low[to]:
_SCREAMING_SNAKE_CASE : Optional[int] = True
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = min(low[at] , _snake_case )
return out_edge_count
for i in range(_snake_case ):
if not visited[i]:
_SCREAMING_SNAKE_CASE : Tuple = 0
_SCREAMING_SNAKE_CASE : Tuple = dfs(_snake_case , _snake_case , -1 , _snake_case )
_SCREAMING_SNAKE_CASE : Optional[Any] = out_edge_count > 1
for x in range(len(_snake_case ) ):
if is_art[x] is True:
print(_snake_case )
# Adjacency list of graph
lowerCAmelCase_ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 708 | """simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "facebook/bart-large-mnli"
a = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a = "text_classifier"
a = AutoTokenizer
a = AutoModelForSequenceClassification
a = ["text", ["text"]]
a = ["text"]
def _lowerCAmelCase ( self : int):
"""simple docstring"""
super().setup()
_SCREAMING_SNAKE_CASE : Any = self.model.config
_SCREAMING_SNAKE_CASE : Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail"""):
_SCREAMING_SNAKE_CASE : List[Any] = int(_A)
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""")
def _lowerCAmelCase ( self : Optional[Any] , _A : Tuple , _A : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = labels
return self.pre_processor(
[text] * len(_A) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def _lowerCAmelCase ( self : Tuple , _A : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = outputs.logits
_SCREAMING_SNAKE_CASE : List[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 635 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowerCAmelCase_ = None
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase_ = {
'moussaKam/mbarthez': 1024,
'moussaKam/barthez': 1024,
'moussaKam/barthez-orangesum-title': 1024,
}
lowerCAmelCase_ = '▁'
class _snake_case ( lowerCAmelCase__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["input_ids", "attention_mask"]
a = BarthezTokenizer
def __init__( self : int , _A : List[str]=None , _A : Dict=None , _A : str="<s>" , _A : Tuple="</s>" , _A : Any="</s>" , _A : List[Any]="<s>" , _A : Optional[Any]="<unk>" , _A : List[Any]="<pad>" , _A : Union[str, Any]="<mask>" , **_A : List[str] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) else mask_token
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
_SCREAMING_SNAKE_CASE : Dict = vocab_file
_SCREAMING_SNAKE_CASE : Optional[Any] = False if not self.vocab_file else True
def _lowerCAmelCase ( self : List[str] , _A : Any , _A : List[str] = None):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id]
_SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self : Tuple , _A : Optional[Any] , _A : Dict = None):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCAmelCase ( self : Optional[int] , _A : str , _A : Union[str, Any] = None):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""")
if not os.path.isdir(_SCREAMING_SNAKE_CASE):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
_SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(_SCREAMING_SNAKE_CASE):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
| 709 | """simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""")
_SCREAMING_SNAKE_CASE : str = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""")
model.to(_A)
from datasets import load_dataset
_SCREAMING_SNAKE_CASE : Any = load_dataset("""nielsr/rvlcdip-demo""")
_SCREAMING_SNAKE_CASE : Any = dataset["""train"""][0]["""image"""].convert("""RGB""")
_SCREAMING_SNAKE_CASE : str = image_processor(_A , return_tensors="""pt""").to(_A)
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Any = model(**_A)
_SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
_SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_6))
self.assertEqual(logits.shape , _A)
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=_A , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1e-4))
| 635 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 710 | """simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "M-CLIP"
def __init__( self : Optional[Any] , _A : List[str]=1_0_2_4 , _A : Union[str, Any]=7_6_8 , **_A : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = transformerDimSize
_SCREAMING_SNAKE_CASE : List[str] = imageDimSize
super().__init__(**_A)
class _snake_case ( __snake_case ):
"""simple docstring"""
a = MCLIPConfig
def __init__( self : Dict , _A : Optional[Any] , *_A : Any , **_A : Dict):
"""simple docstring"""
super().__init__(_A , *_A , **_A)
_SCREAMING_SNAKE_CASE : Tuple = XLMRobertaModel(_A)
_SCREAMING_SNAKE_CASE : List[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims)
def _lowerCAmelCase ( self : Union[str, Any] , _A : str , _A : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.transformer(input_ids=_A , attention_mask=_A)[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
return self.LinearTransformation(_A), embs
| 635 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 711 | """simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
_SCREAMING_SNAKE_CASE : int = precision
_SCREAMING_SNAKE_CASE : Dict = ceil(precision / 14 )
_SCREAMING_SNAKE_CASE : int = 426_880 * Decimal(10_005 ).sqrt()
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
_SCREAMING_SNAKE_CASE : str = 13_591_409
_SCREAMING_SNAKE_CASE : Tuple = Decimal(__SCREAMING_SNAKE_CASE )
for k in range(1 , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(__SCREAMING_SNAKE_CASE ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCAmelCase_ = 50
print(F"The first {n} digits of pi is: {pi(n)}")
| 635 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''FNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''FNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FNetForMaskedLM''',
'''FNetForMultipleChoice''',
'''FNetForNextSentencePrediction''',
'''FNetForPreTraining''',
'''FNetForQuestionAnswering''',
'''FNetForSequenceClassification''',
'''FNetForTokenClassification''',
'''FNetLayer''',
'''FNetModel''',
'''FNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 712 | """simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
_SCREAMING_SNAKE_CASE : Optional[int] = TapasConfig.from_json_file(__SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
_SCREAMING_SNAKE_CASE : Dict = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_SCREAMING_SNAKE_CASE : str = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
_SCREAMING_SNAKE_CASE : Optional[int] = 4
_SCREAMING_SNAKE_CASE : Any = True
# hparam_utils.py hparams
_SCREAMING_SNAKE_CASE : Any = 0.66_46_94
_SCREAMING_SNAKE_CASE : str = 0.20_79_51
_SCREAMING_SNAKE_CASE : str = 0.12_11_94
_SCREAMING_SNAKE_CASE : List[Any] = True
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = 0.0_35_25_13
_SCREAMING_SNAKE_CASE : Optional[Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_SCREAMING_SNAKE_CASE : int = 4
_SCREAMING_SNAKE_CASE : Tuple = False
# hparam_utils.py hparams
_SCREAMING_SNAKE_CASE : Any = 36.45_19
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0.90_34_21
_SCREAMING_SNAKE_CASE : Optional[Any] = 2_22.0_88
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Optional[int] = True
_SCREAMING_SNAKE_CASE : Dict = 0.76_31_41
_SCREAMING_SNAKE_CASE : Union[str, Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
_SCREAMING_SNAKE_CASE : int = TapasForSequenceClassification(config=__SCREAMING_SNAKE_CASE )
elif task == "MLM":
_SCREAMING_SNAKE_CASE : int = TapasForMaskedLM(config=__SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
_SCREAMING_SNAKE_CASE : int = TapasModel(config=__SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
_SCREAMING_SNAKE_CASE : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 635 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 713 | """simple docstring"""
from typing import Any
import numpy as np
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool:
return np.array_equal(__SCREAMING_SNAKE_CASE , matrix.conjugate().T )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any:
_SCREAMING_SNAKE_CASE : Optional[int] = v.conjugate().T
_SCREAMING_SNAKE_CASE : Optional[int] = v_star.dot(__SCREAMING_SNAKE_CASE )
assert isinstance(__SCREAMING_SNAKE_CASE , np.ndarray )
return (v_star_dot.dot(__SCREAMING_SNAKE_CASE )) / (v_star.dot(__SCREAMING_SNAKE_CASE ))
def lowerCamelCase_()-> None:
_SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
_SCREAMING_SNAKE_CASE : int = np.array([[1], [2], [3]] )
assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian."""
print(rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
_SCREAMING_SNAKE_CASE : int = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian."""
assert rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 635 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _snake_case ( __lowerCamelCase ):
"""simple docstring"""
a = 'bert'
def __init__( self : List[str] , _A : str=3_0_5_2_2 , _A : Optional[int]=7_6_8 , _A : Tuple=1_2 , _A : int=1_2 , _A : int=3_0_7_2 , _A : Optional[int]="gelu" , _A : Optional[Any]=0.1 , _A : List[Any]=0.1 , _A : Optional[int]=5_1_2 , _A : List[Any]=2 , _A : int=0.02 , _A : List[str]=1e-12 , _A : Dict=0 , _A : List[Any]="absolute" , _A : Tuple=True , _A : Tuple=None , **_A : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_)
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = position_embedding_type
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = classifier_dropout
class _snake_case ( __lowerCamelCase ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
])
| 714 | """simple docstring"""
from __future__ import annotations
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 635 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class _snake_case ( _UpperCamelCase ):
"""simple docstring"""
a = "xlm"
a = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self : str , _A : Optional[int]=3_0_1_4_5 , _A : Union[str, Any]=2_0_4_8 , _A : List[Any]=1_2 , _A : Optional[Any]=1_6 , _A : Optional[int]=0.1 , _A : str=0.1 , _A : List[Any]=True , _A : Optional[int]=False , _A : Any=False , _A : List[str]=False , _A : List[Any]=1 , _A : List[Any]=True , _A : Any=5_1_2 , _A : Tuple=2_0_4_8**-0.5 , _A : Dict=1e-12 , _A : Any=0.02 , _A : List[str]=0 , _A : Tuple=1 , _A : Optional[int]=2 , _A : Union[str, Any]=3 , _A : Optional[Any]=5 , _A : List[str]=True , _A : str="first" , _A : Optional[int]=True , _A : Any=None , _A : Tuple=True , _A : Tuple=0.1 , _A : List[str]=5 , _A : List[str]=5 , _A : Dict=0 , _A : Tuple=0 , _A : int=2 , _A : Dict=0 , **_A : Dict , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = vocab_size
_SCREAMING_SNAKE_CASE : Optional[Any] = emb_dim
_SCREAMING_SNAKE_CASE : int = n_layers
_SCREAMING_SNAKE_CASE : Tuple = n_heads
_SCREAMING_SNAKE_CASE : Optional[int] = dropout
_SCREAMING_SNAKE_CASE : int = attention_dropout
_SCREAMING_SNAKE_CASE : Optional[int] = gelu_activation
_SCREAMING_SNAKE_CASE : Optional[int] = sinusoidal_embeddings
_SCREAMING_SNAKE_CASE : str = causal
_SCREAMING_SNAKE_CASE : Optional[int] = asm
_SCREAMING_SNAKE_CASE : Optional[int] = n_langs
_SCREAMING_SNAKE_CASE : List[str] = use_lang_emb
_SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
_SCREAMING_SNAKE_CASE : Optional[Any] = bos_index
_SCREAMING_SNAKE_CASE : int = eos_index
_SCREAMING_SNAKE_CASE : Dict = pad_index
_SCREAMING_SNAKE_CASE : Tuple = unk_index
_SCREAMING_SNAKE_CASE : List[str] = mask_index
_SCREAMING_SNAKE_CASE : Dict = is_encoder
_SCREAMING_SNAKE_CASE : Any = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[Any] = embed_init_std
_SCREAMING_SNAKE_CASE : Any = init_std
_SCREAMING_SNAKE_CASE : List[str] = summary_type
_SCREAMING_SNAKE_CASE : Union[str, Any] = summary_use_proj
_SCREAMING_SNAKE_CASE : Any = summary_activation
_SCREAMING_SNAKE_CASE : List[Any] = summary_proj_to_labels
_SCREAMING_SNAKE_CASE : Union[str, Any] = summary_first_dropout
_SCREAMING_SNAKE_CASE : Tuple = start_n_top
_SCREAMING_SNAKE_CASE : List[Any] = end_n_top
_SCREAMING_SNAKE_CASE : List[str] = mask_token_id
_SCREAMING_SNAKE_CASE : List[str] = lang_id
if "n_words" in kwargs:
_SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs["n_words"]
super().__init__(pad_token_id=_A , bos_token_id=_A , **_A)
class _snake_case ( _UpperCamelCase ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
_SCREAMING_SNAKE_CASE : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
])
| 715 | """simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 )-> str:
_SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetDict(
{
"""train""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ),
"""validation""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_SCREAMING_SNAKE_CASE : str = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_SCREAMING_SNAKE_CASE : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_SCREAMING_SNAKE_CASE : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_SCREAMING_SNAKE_CASE : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
_SCREAMING_SNAKE_CASE : Any = 8
else:
_SCREAMING_SNAKE_CASE : Optional[int] = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = DataLoader(
tokenized_datasets["""test"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader, test_dataloader
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
# New Code #
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
# Download the dataset
_SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("""glue""" , """mrpc""" )
# Create our splits
_SCREAMING_SNAKE_CASE : Dict = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_SCREAMING_SNAKE_CASE : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE : Tuple = config["""lr"""]
_SCREAMING_SNAKE_CASE : Tuple = int(config["""num_epochs"""] )
_SCREAMING_SNAKE_CASE : int = int(config["""seed"""] )
_SCREAMING_SNAKE_CASE : int = int(config["""batch_size"""] )
_SCREAMING_SNAKE_CASE : List[str] = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_SCREAMING_SNAKE_CASE : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_SCREAMING_SNAKE_CASE : List[str] = batch_size // MAX_GPU_BATCH_SIZE
_SCREAMING_SNAKE_CASE : List[str] = MAX_GPU_BATCH_SIZE
set_seed(__SCREAMING_SNAKE_CASE )
# New Code #
# Create our folds:
_SCREAMING_SNAKE_CASE : List[str] = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_SCREAMING_SNAKE_CASE : Optional[Any] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = get_fold_dataloaders(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_SCREAMING_SNAKE_CASE : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_SCREAMING_SNAKE_CASE : int = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
_SCREAMING_SNAKE_CASE : int = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(**__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = outputs.loss
_SCREAMING_SNAKE_CASE : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , )
_SCREAMING_SNAKE_CASE : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , __SCREAMING_SNAKE_CASE )
# New Code #
# We also run predictions on the test set at the very end
_SCREAMING_SNAKE_CASE : str = []
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 )
_SCREAMING_SNAKE_CASE : List[str] = torch.stack(__SCREAMING_SNAKE_CASE , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_SCREAMING_SNAKE_CASE : int = metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )
accelerator.print("""Average test metrics from all folds:""" , __SCREAMING_SNAKE_CASE )
def lowerCamelCase_()-> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=__SCREAMING_SNAKE_CASE , default=3 , help="""The number of splits to perform across the dataset""" )
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
_SCREAMING_SNAKE_CASE : Optional[int] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 635 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _snake_case :
"""simple docstring"""
def __init__( self : Tuple , _A : Union[str, Any] , _A : Optional[int]=1_3 , _A : Dict=7 , _A : str=True , _A : str=True , _A : Optional[int]=True , _A : Any=True , _A : Dict=9_9 , _A : str=[1, 1, 2] , _A : List[Any]=1 , _A : List[Any]=3_2 , _A : Optional[int]=4 , _A : str=8 , _A : Any=3_7 , _A : Union[str, Any]="gelu_new" , _A : List[Any]=0.1 , _A : Optional[int]=0.1 , _A : Dict=0.0 , _A : List[Any]=5_1_2 , _A : Optional[int]=3 , _A : Dict=0.02 , _A : List[str]=3 , _A : List[str]=4 , _A : Any=None , _A : Optional[int]=False , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = parent
_SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
_SCREAMING_SNAKE_CASE : Tuple = seq_length
_SCREAMING_SNAKE_CASE : List[Any] = is_training
_SCREAMING_SNAKE_CASE : Optional[Any] = use_input_mask
_SCREAMING_SNAKE_CASE : Optional[Any] = use_token_type_ids
_SCREAMING_SNAKE_CASE : List[Any] = use_labels
_SCREAMING_SNAKE_CASE : Tuple = vocab_size
_SCREAMING_SNAKE_CASE : Optional[int] = block_sizes
_SCREAMING_SNAKE_CASE : Dict = num_decoder_layers
_SCREAMING_SNAKE_CASE : int = d_model
_SCREAMING_SNAKE_CASE : Dict = n_head
_SCREAMING_SNAKE_CASE : Optional[Any] = d_head
_SCREAMING_SNAKE_CASE : Union[str, Any] = d_inner
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout
_SCREAMING_SNAKE_CASE : List[Any] = attention_dropout
_SCREAMING_SNAKE_CASE : List[Any] = activation_dropout
_SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
_SCREAMING_SNAKE_CASE : Any = type_vocab_size
_SCREAMING_SNAKE_CASE : Tuple = 2
_SCREAMING_SNAKE_CASE : List[Any] = num_labels
_SCREAMING_SNAKE_CASE : Tuple = num_choices
_SCREAMING_SNAKE_CASE : Any = scope
_SCREAMING_SNAKE_CASE : List[Any] = initializer_std
# Used in the tests to check the size of the first attention layer
_SCREAMING_SNAKE_CASE : List[str] = n_head
# Used in the tests to check the size of the first hidden state
_SCREAMING_SNAKE_CASE : List[str] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
_SCREAMING_SNAKE_CASE : List[Any] = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
_SCREAMING_SNAKE_CASE : Any = self.num_hidden_layers + 2
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_SCREAMING_SNAKE_CASE : str = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
_SCREAMING_SNAKE_CASE : int = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_SCREAMING_SNAKE_CASE : List[Any] = None
_SCREAMING_SNAKE_CASE : List[Any] = None
_SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices)
_SCREAMING_SNAKE_CASE : Tuple = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def _lowerCAmelCase ( self : Union[str, Any] , _A : Tuple , _A : int , _A : str , _A : int , _A : str , _A : List[str] , _A : Union[str, Any] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = TFFunnelModel(config=lowercase_)
_SCREAMING_SNAKE_CASE : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowercase_)
_SCREAMING_SNAKE_CASE : Dict = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE : Tuple = model(lowercase_)
_SCREAMING_SNAKE_CASE : List[str] = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
_SCREAMING_SNAKE_CASE : List[Any] = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFFunnelModel(config=lowercase_)
_SCREAMING_SNAKE_CASE : str = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : List[str] = TFFunnelModel(config=lowercase_)
_SCREAMING_SNAKE_CASE : Dict = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def _lowerCAmelCase ( self : str , _A : Any , _A : Dict , _A : int , _A : Any , _A : Tuple , _A : List[str] , _A : str , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = TFFunnelBaseModel(config=lowercase_)
_SCREAMING_SNAKE_CASE : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE : Tuple = model(lowercase_)
_SCREAMING_SNAKE_CASE : str = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE : int = model(lowercase_)
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
_SCREAMING_SNAKE_CASE : List[Any] = False
_SCREAMING_SNAKE_CASE : Optional[int] = TFFunnelBaseModel(config=lowercase_)
_SCREAMING_SNAKE_CASE : List[str] = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
_SCREAMING_SNAKE_CASE : List[Any] = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFFunnelBaseModel(config=lowercase_)
_SCREAMING_SNAKE_CASE : Tuple = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def _lowerCAmelCase ( self : Dict , _A : Dict , _A : str , _A : str , _A : Tuple , _A : Optional[int] , _A : str , _A : Optional[Any] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = TFFunnelForPreTraining(config=lowercase_)
_SCREAMING_SNAKE_CASE : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE : Optional[Any] = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self : int , _A : int , _A : List[Any] , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : List[Any] , _A : Union[str, Any] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = TFFunnelForMaskedLM(config=lowercase_)
_SCREAMING_SNAKE_CASE : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE : Tuple = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self : Any , _A : List[str] , _A : Optional[int] , _A : str , _A : int , _A : str , _A : Dict , _A : Union[str, Any] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFFunnelForSequenceClassification(config=lowercase_)
_SCREAMING_SNAKE_CASE : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE : List[str] = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCAmelCase ( self : Optional[int] , _A : List[str] , _A : List[str] , _A : Any , _A : int , _A : Any , _A : Tuple , _A : List[str] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = self.num_choices
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFFunnelForMultipleChoice(config=lowercase_)
_SCREAMING_SNAKE_CASE : Optional[int] = tf.tile(tf.expand_dims(lowercase_ , 1) , (1, self.num_choices, 1))
_SCREAMING_SNAKE_CASE : Tuple = tf.tile(tf.expand_dims(lowercase_ , 1) , (1, self.num_choices, 1))
_SCREAMING_SNAKE_CASE : Dict = tf.tile(tf.expand_dims(lowercase_ , 1) , (1, self.num_choices, 1))
_SCREAMING_SNAKE_CASE : Dict = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE : str = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self : Dict , _A : List[Any] , _A : str , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : Optional[int] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = self.num_labels
_SCREAMING_SNAKE_CASE : List[Any] = TFFunnelForTokenClassification(config=lowercase_)
_SCREAMING_SNAKE_CASE : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE : Any = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self : str , _A : int , _A : Optional[Any] , _A : str , _A : Optional[Any] , _A : Union[str, Any] , _A : List[str] , _A : Optional[Any] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = TFFunnelForQuestionAnswering(config=lowercase_)
_SCREAMING_SNAKE_CASE : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE : Dict = model(lowercase_)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : Tuple = config_and_inputs
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
a = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
a = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
a = False
a = False
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = TFFunnelModelTester(self)
_SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self , config_class=lowercase_)
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase_)
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_)
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_)
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_)
@require_tf
class _snake_case ( __snake_case , unittest.TestCase ):
"""simple docstring"""
a = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
a = False
a = False
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = TFFunnelModelTester(self , base=lowercase_)
_SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=lowercase_)
def _lowerCAmelCase ( self : int):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase_)
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_)
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_)
| 716 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 635 | 0 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[Any] , _A : Dict , _A : Union[str, Any]=1_3 , _A : str=7 , _A : Optional[Any]=True , _A : Any=True , _A : Any=True , _A : Union[str, Any]=True , _A : List[str]=9_9 , _A : Tuple=3_2 , _A : List[str]=5 , _A : Optional[Any]=4 , _A : int=3_7 , _A : Union[str, Any]="gelu" , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : str=5_1_2 , _A : Union[str, Any]=1_6 , _A : Any=2 , _A : Union[str, Any]=0.02 , _A : Union[str, Any]=False , _A : Union[str, Any]=True , _A : Tuple="None" , _A : Optional[Any]=3 , _A : List[Any]=4 , _A : Dict=None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = parent
_SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
_SCREAMING_SNAKE_CASE : int = seq_length
_SCREAMING_SNAKE_CASE : str = is_training
_SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask
_SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
_SCREAMING_SNAKE_CASE : str = use_labels
_SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
_SCREAMING_SNAKE_CASE : Any = hidden_size
_SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Any = num_attention_heads
_SCREAMING_SNAKE_CASE : str = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
_SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : str = max_position_embeddings
_SCREAMING_SNAKE_CASE : int = type_vocab_size
_SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
_SCREAMING_SNAKE_CASE : Tuple = initializer_range
_SCREAMING_SNAKE_CASE : Any = num_labels
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
_SCREAMING_SNAKE_CASE : Union[str, Any] = relative_attention
_SCREAMING_SNAKE_CASE : str = position_biased_input
_SCREAMING_SNAKE_CASE : int = pos_att_type
_SCREAMING_SNAKE_CASE : str = scope
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_SCREAMING_SNAKE_CASE : str = None
_SCREAMING_SNAKE_CASE : Any = None
_SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_choices)
_SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _lowerCAmelCase ( self : Any , _A : str):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size()) , [])
def _lowerCAmelCase ( self : Union[str, Any] , _A : Tuple , _A : Any , _A : Any , _A : int , _A : Optional[int] , _A : List[Any] , _A : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = DebertaVaModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
_SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)[0]
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)[0]
_SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase_)[0]
self.parent.assertListEqual(list(sequence_output.size()) , [self.batch_size, self.seq_length, self.hidden_size])
def _lowerCAmelCase ( self : Union[str, Any] , _A : Tuple , _A : int , _A : str , _A : Union[str, Any] , _A : Any , _A : Dict , _A : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = DebertaVaForMaskedLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
_SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self : str , _A : Any , _A : Tuple , _A : str , _A : Union[str, Any] , _A : Any , _A : Optional[int] , _A : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = self.num_labels
_SCREAMING_SNAKE_CASE : Any = DebertaVaForSequenceClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
_SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertListEqual(list(result.logits.size()) , [self.batch_size, self.num_labels])
self.check_loss_output(lowerCAmelCase_)
def _lowerCAmelCase ( self : Optional[Any] , _A : List[Any] , _A : Union[str, Any] , _A : str , _A : Tuple , _A : List[str] , _A : Tuple , _A : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = self.num_labels
_SCREAMING_SNAKE_CASE : Tuple = DebertaVaForTokenClassification(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
_SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self : List[Any] , _A : List[Any] , _A : List[str] , _A : int , _A : Union[str, Any] , _A : str , _A : Tuple , _A : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = DebertaVaForQuestionAnswering(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : str , _A : Dict , _A : List[Any] , _A : Optional[int] , _A : Optional[int] , _A : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = DebertaVaForMultipleChoice(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
_SCREAMING_SNAKE_CASE : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_SCREAMING_SNAKE_CASE : Optional[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_SCREAMING_SNAKE_CASE : Union[str, Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_SCREAMING_SNAKE_CASE : List[str] = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : Optional[int] = config_and_inputs
_SCREAMING_SNAKE_CASE : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
a = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
a = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
a = True
a = False
a = False
a = False
a = False
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = DebertaVaModelTester(self)
_SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7)
def _lowerCAmelCase ( self : int):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_)
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_)
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_)
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_)
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_)
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_)
@slow
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : Dict = DebertaVaModel.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""")
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
pass
@slow
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""")
_SCREAMING_SNAKE_CASE : int = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]])
_SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
_SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_)[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4) , f"""{output[:, 1:4, 1:4]}""")
| 717 | """simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _snake_case :
"""simple docstring"""
def __init__( self : int , _A : List[Any] , _A : int , _A : int):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""")
_SCREAMING_SNAKE_CASE : str = img
_SCREAMING_SNAKE_CASE : Optional[Any] = img.shape[1]
_SCREAMING_SNAKE_CASE : Tuple = img.shape[0]
_SCREAMING_SNAKE_CASE : Any = dst_width
_SCREAMING_SNAKE_CASE : Any = dst_height
_SCREAMING_SNAKE_CASE : Any = self.src_w / self.dst_w
_SCREAMING_SNAKE_CASE : Dict = self.src_h / self.dst_h
_SCREAMING_SNAKE_CASE : Optional[Any] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 2_5_5
)
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
for i in range(self.dst_h):
for j in range(self.dst_w):
_SCREAMING_SNAKE_CASE : Any = self.img[self.get_y(_A)][self.get_x(_A)]
def _lowerCAmelCase ( self : int , _A : int):
"""simple docstring"""
return int(self.ratio_x * x)
def _lowerCAmelCase ( self : str , _A : int):
"""simple docstring"""
return int(self.ratio_y * y)
if __name__ == "__main__":
lowerCAmelCase_ , lowerCAmelCase_ = 800, 600
lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1)
lowerCAmelCase_ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output
)
waitKey(0)
destroyAllWindows()
| 635 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class _snake_case ( __snake_case ):
"""simple docstring"""
a = 'conditional_detr'
a = ['past_key_values']
a = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Tuple , _A : Optional[int]=True , _A : str=None , _A : str=3 , _A : Any=3_0_0 , _A : Optional[int]=6 , _A : str=2_0_4_8 , _A : Optional[int]=8 , _A : Optional[int]=6 , _A : Optional[Any]=2_0_4_8 , _A : Any=8 , _A : Optional[int]=0.0 , _A : int=0.0 , _A : Optional[int]=True , _A : Optional[int]="relu" , _A : str=2_5_6 , _A : int=0.1 , _A : Dict=0.0 , _A : int=0.0 , _A : Optional[int]=0.02 , _A : List[str]=1.0 , _A : List[Any]=False , _A : str="sine" , _A : Tuple="resnet50" , _A : int=True , _A : Optional[int]=False , _A : List[str]=2 , _A : Dict=5 , _A : Union[str, Any]=2 , _A : Dict=1 , _A : Union[str, Any]=1 , _A : Dict=2 , _A : Union[str, Any]=5 , _A : List[Any]=2 , _A : Dict=0.25 , **_A : int , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can\'t specify both `backbone_config` and `use_timm_backbone`.""")
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""")
_SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAPPING['''resnet'''](out_features=["""stage4"""])
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__):
_SCREAMING_SNAKE_CASE : Tuple = backbone_config.get("""model_type""")
_SCREAMING_SNAKE_CASE : str = CONFIG_MAPPING[backbone_model_type]
_SCREAMING_SNAKE_CASE : str = config_class.from_dict(UpperCAmelCase__)
_SCREAMING_SNAKE_CASE : Dict = use_timm_backbone
_SCREAMING_SNAKE_CASE : List[Any] = backbone_config
_SCREAMING_SNAKE_CASE : List[Any] = num_channels
_SCREAMING_SNAKE_CASE : List[str] = num_queries
_SCREAMING_SNAKE_CASE : str = d_model
_SCREAMING_SNAKE_CASE : int = encoder_ffn_dim
_SCREAMING_SNAKE_CASE : str = encoder_layers
_SCREAMING_SNAKE_CASE : int = encoder_attention_heads
_SCREAMING_SNAKE_CASE : Dict = decoder_ffn_dim
_SCREAMING_SNAKE_CASE : int = decoder_layers
_SCREAMING_SNAKE_CASE : Any = decoder_attention_heads
_SCREAMING_SNAKE_CASE : Any = dropout
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
_SCREAMING_SNAKE_CASE : List[str] = activation_dropout
_SCREAMING_SNAKE_CASE : str = activation_function
_SCREAMING_SNAKE_CASE : Any = init_std
_SCREAMING_SNAKE_CASE : Any = init_xavier_std
_SCREAMING_SNAKE_CASE : Tuple = encoder_layerdrop
_SCREAMING_SNAKE_CASE : List[Any] = decoder_layerdrop
_SCREAMING_SNAKE_CASE : List[Any] = encoder_layers
_SCREAMING_SNAKE_CASE : List[str] = auxiliary_loss
_SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
_SCREAMING_SNAKE_CASE : Any = backbone
_SCREAMING_SNAKE_CASE : List[Any] = use_pretrained_backbone
_SCREAMING_SNAKE_CASE : int = dilation
# Hungarian matcher
_SCREAMING_SNAKE_CASE : Union[str, Any] = class_cost
_SCREAMING_SNAKE_CASE : Any = bbox_cost
_SCREAMING_SNAKE_CASE : Dict = giou_cost
# Loss coefficients
_SCREAMING_SNAKE_CASE : List[Any] = mask_loss_coefficient
_SCREAMING_SNAKE_CASE : Union[str, Any] = dice_loss_coefficient
_SCREAMING_SNAKE_CASE : Tuple = cls_loss_coefficient
_SCREAMING_SNAKE_CASE : Dict = bbox_loss_coefficient
_SCREAMING_SNAKE_CASE : int = giou_loss_coefficient
_SCREAMING_SNAKE_CASE : Optional[Any] = focal_alpha
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__)
@property
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return self.d_model
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.backbone_config.to_dict()
_SCREAMING_SNAKE_CASE : List[str] = self.__class__.model_type
return output
class _snake_case ( __snake_case ):
"""simple docstring"""
a = version.parse("1.11" )
@property
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
])
@property
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
return 1e-5
@property
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
return 1_2
| 718 | """simple docstring"""
import argparse
from collections import defaultdict
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int:
_SCREAMING_SNAKE_CASE : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines()
_SCREAMING_SNAKE_CASE : Optional[Any] = F"""class {class_name}("""
_SCREAMING_SNAKE_CASE : List[Any] = F"""{4 * " "}def {test_name}("""
_SCREAMING_SNAKE_CASE : Tuple = F"""{8 * " "}{correct_line.split()[0]}"""
_SCREAMING_SNAKE_CASE : List[Any] = F"""{16 * " "}{correct_line.split()[0]}"""
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
_SCREAMING_SNAKE_CASE : Dict = []
for line in lines:
if line.startswith(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = True
elif in_class and line.startswith(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = True
elif in_class and in_func and (line.startswith(__SCREAMING_SNAKE_CASE ) or line.startswith(__SCREAMING_SNAKE_CASE )):
_SCREAMING_SNAKE_CASE : Dict = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_SCREAMING_SNAKE_CASE : int = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_SCREAMING_SNAKE_CASE : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_SCREAMING_SNAKE_CASE : Optional[int] = False
else:
new_lines.append(__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , """w""" ) as f:
for line in new_lines:
f.write(__SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None )-> Optional[Any]:
if fail is not None:
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {l.strip() for l in f.readlines()}
else:
_SCREAMING_SNAKE_CASE : str = None
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
_SCREAMING_SNAKE_CASE : str = f.readlines()
_SCREAMING_SNAKE_CASE : str = defaultdict(__SCREAMING_SNAKE_CASE )
for line in correct_lines:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
lowerCAmelCase_ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 635 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""LayoutLMv2FeatureExtractor"""]
lowerCAmelCase_ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719 | """simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCAmelCase_ = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> str:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = create_model(
"""HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : Optional[Any] = R""".*sequential.(\d+).*"""
_SCREAMING_SNAKE_CASE : Any = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# replace sequential layers with list
_SCREAMING_SNAKE_CASE : List[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 )
_SCREAMING_SNAKE_CASE : Dict = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.""" )
elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[str] = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_SCREAMING_SNAKE_CASE : Dict = 1 if projecton_layer == 0 else 2
_SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
_SCREAMING_SNAKE_CASE : Dict = value
_SCREAMING_SNAKE_CASE : List[Any] = mixed_qkv.size(0 ) // 3
_SCREAMING_SNAKE_CASE : Optional[Any] = mixed_qkv[:qkv_dim]
_SCREAMING_SNAKE_CASE : str = mixed_qkv[qkv_dim : qkv_dim * 2]
_SCREAMING_SNAKE_CASE : Any = mixed_qkv[qkv_dim * 2 :]
_SCREAMING_SNAKE_CASE : Dict = query_layer
_SCREAMING_SNAKE_CASE : List[Any] = key_layer
_SCREAMING_SNAKE_CASE : Dict = value_layer
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = value
return model_state_dict
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> List[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE )
clap_model.eval()
_SCREAMING_SNAKE_CASE : Dict = clap_model.state_dict()
_SCREAMING_SNAKE_CASE : Tuple = rename_state_dict(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : int = ClapConfig()
_SCREAMING_SNAKE_CASE : Tuple = enable_fusion
_SCREAMING_SNAKE_CASE : Dict = ClapModel(__SCREAMING_SNAKE_CASE )
# ignore the spectrogram embedding layer
model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
lowerCAmelCase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 635 | 0 |
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '''T5Config'''
class _snake_case ( __lowerCamelCase ):
"""simple docstring"""
a = "mt5"
a = MTaConfig
class _snake_case ( __lowerCamelCase ):
"""simple docstring"""
a = "mt5"
a = MTaConfig
class _snake_case ( __lowerCamelCase ):
"""simple docstring"""
a = "mt5"
a = MTaConfig
| 720 | """simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_A , )
assert hasattr(self , """env""")
def _lowerCAmelCase ( self : Union[str, Any] , _A : str=1):
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , )
def _lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any]):
"""simple docstring"""
TrainingJobAnalytics(_A).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""")
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_SCREAMING_SNAKE_CASE : Any = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_SCREAMING_SNAKE_CASE : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""])
_SCREAMING_SNAKE_CASE : Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_SCREAMING_SNAKE_CASE : int = (
Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy)
assert all(t <= self.results["""eval_loss"""] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""") as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _A)
| 635 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase_ = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 721 | """simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any:
return max(metric_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for gt in ground_truths )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]:
_SCREAMING_SNAKE_CASE : List[str] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()]
_SCREAMING_SNAKE_CASE : Dict = []
if args.gold_data_mode == "qa":
_SCREAMING_SNAKE_CASE : int = pd.read_csv(__SCREAMING_SNAKE_CASE , sep="""\t""" , header=__SCREAMING_SNAKE_CASE )
for answer_list in data[1]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ast.literal_eval(__SCREAMING_SNAKE_CASE )
answers.append(__SCREAMING_SNAKE_CASE )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()]
_SCREAMING_SNAKE_CASE : Optional[int] = [[reference] for reference in references]
_SCREAMING_SNAKE_CASE : Optional[int] = 0
for prediction, ground_truths in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
total += 1
em += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
fa += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = 1_00.0 * em / total
_SCREAMING_SNAKE_CASE : Optional[Any] = 1_00.0 * fa / total
logger.info(F"""F1: {fa:.2f}""" )
logger.info(F"""EM: {em:.2f}""" )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Tuple = args.k
_SCREAMING_SNAKE_CASE : int = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()]
_SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()]
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
for hypo, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = set(hypo.split("""\t""" )[:k] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_SCREAMING_SNAKE_CASE : int = 1_00.0 * em / total
logger.info(F"""Precision@{k}: {em: .2f}""" )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
def strip_title(__SCREAMING_SNAKE_CASE ):
if title.startswith("""\"""" ):
_SCREAMING_SNAKE_CASE : Optional[int] = title[1:]
if title.endswith("""\"""" ):
_SCREAMING_SNAKE_CASE : str = title[:-1]
return title
_SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , )["""input_ids"""].to(args.device )
_SCREAMING_SNAKE_CASE : List[str] = rag_model.rag.question_encoder(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Any = question_enc_outputs[0]
_SCREAMING_SNAKE_CASE : List[Any] = rag_model.retriever(
__SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
_SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
for docs in all_docs:
_SCREAMING_SNAKE_CASE : str = [strip_title(__SCREAMING_SNAKE_CASE ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(__SCREAMING_SNAKE_CASE ) )
return provenance_strings
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]:
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.input_ids.to(args.device )
_SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.attention_mask.to(args.device )
_SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.generate( # rag_model overwrites generate
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.generator_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
if args.print_predictions:
for q, a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
logger.info("""Q: {} - A: {}""".format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
return answers
def lowerCamelCase_()-> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__SCREAMING_SNAKE_CASE , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=__SCREAMING_SNAKE_CASE , choices=["""exact""", """compressed""", """legacy"""] , type=__SCREAMING_SNAKE_CASE , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=__SCREAMING_SNAKE_CASE , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__SCREAMING_SNAKE_CASE , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=__SCREAMING_SNAKE_CASE , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=__SCREAMING_SNAKE_CASE , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=__SCREAMING_SNAKE_CASE , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=__SCREAMING_SNAKE_CASE , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
_SCREAMING_SNAKE_CASE : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if args.model_type is None:
_SCREAMING_SNAKE_CASE : Optional[int] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
_SCREAMING_SNAKE_CASE : List[Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
_SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = args.index_name
if args.index_path is not None:
_SCREAMING_SNAKE_CASE : Any = args.index_path
else:
_SCREAMING_SNAKE_CASE : Any = BartForConditionalGeneration
_SCREAMING_SNAKE_CASE : int = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , __SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
_SCREAMING_SNAKE_CASE : Tuple = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(__SCREAMING_SNAKE_CASE ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
_SCREAMING_SNAKE_CASE : str = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , retriever=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
model.retriever.init_retrieval()
else:
_SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
_SCREAMING_SNAKE_CASE : str = []
for line in tqdm(__SCREAMING_SNAKE_CASE ):
questions.append(line.strip() )
if len(__SCREAMING_SNAKE_CASE ) == args.eval_batch_size:
_SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) + """\n""" )
preds_file.flush()
_SCREAMING_SNAKE_CASE : Any = []
if len(__SCREAMING_SNAKE_CASE ) > 0:
_SCREAMING_SNAKE_CASE : List[str] = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
preds_file.flush()
score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCAmelCase_ = get_args()
main(args)
| 635 | 0 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] , _A : int , _A : int=2 , _A : Optional[int]=3 , _A : Any=4 , _A : Optional[Any]=2 , _A : str=7 , _A : List[str]=True , _A : Optional[Any]=True , _A : Optional[Any]=True , _A : List[str]=True , _A : List[str]=9_9 , _A : Tuple=3_6 , _A : Union[str, Any]=3 , _A : Optional[int]=4 , _A : List[Any]=3_7 , _A : List[str]="gelu" , _A : Dict=0.1 , _A : List[str]=0.1 , _A : Optional[int]=5_1_2 , _A : Optional[int]=1_6 , _A : Optional[Any]=2 , _A : int=0.02 , _A : Optional[Any]=6 , _A : Optional[int]=6 , _A : Any=3 , _A : Union[str, Any]=4 , _A : Union[str, Any]=None , _A : Dict=1_0_0_0 , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = parent
_SCREAMING_SNAKE_CASE : Tuple = batch_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
_SCREAMING_SNAKE_CASE : str = image_size
_SCREAMING_SNAKE_CASE : str = patch_size
_SCREAMING_SNAKE_CASE : Any = text_seq_length
_SCREAMING_SNAKE_CASE : List[Any] = is_training
_SCREAMING_SNAKE_CASE : Optional[Any] = use_input_mask
_SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
_SCREAMING_SNAKE_CASE : int = use_labels
_SCREAMING_SNAKE_CASE : List[str] = vocab_size
_SCREAMING_SNAKE_CASE : Tuple = hidden_size
_SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
_SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
_SCREAMING_SNAKE_CASE : Dict = intermediate_size
_SCREAMING_SNAKE_CASE : int = hidden_act
_SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Any = max_position_embeddings
_SCREAMING_SNAKE_CASE : Any = type_vocab_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
_SCREAMING_SNAKE_CASE : Tuple = initializer_range
_SCREAMING_SNAKE_CASE : int = coordinate_size
_SCREAMING_SNAKE_CASE : List[Any] = shape_size
_SCREAMING_SNAKE_CASE : str = num_labels
_SCREAMING_SNAKE_CASE : Dict = num_choices
_SCREAMING_SNAKE_CASE : Optional[int] = scope
_SCREAMING_SNAKE_CASE : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_SCREAMING_SNAKE_CASE : Any = text_seq_length
_SCREAMING_SNAKE_CASE : List[Any] = (image_size // patch_size) ** 2 + 1
_SCREAMING_SNAKE_CASE : Optional[int] = self.text_seq_length + self.image_seq_length
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size)
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_SCREAMING_SNAKE_CASE : Dict = bbox[i, j, 3]
_SCREAMING_SNAKE_CASE : int = bbox[i, j, 1]
_SCREAMING_SNAKE_CASE : Dict = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_SCREAMING_SNAKE_CASE : Optional[int] = bbox[i, j, 2]
_SCREAMING_SNAKE_CASE : str = bbox[i, j, 0]
_SCREAMING_SNAKE_CASE : Union[str, Any] = t
_SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : Optional[int] = random_attention_mask([self.batch_size, self.text_seq_length])
_SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size)
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels)
_SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _lowerCAmelCase ( self : Any , _A : Tuple , _A : str , _A : List[str] , _A : Any , _A : Optional[int] , _A : List[str] , _A : Any , _A : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaModel(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
# text + image
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(UpperCAmelCase__ , pixel_values=UpperCAmelCase__)
_SCREAMING_SNAKE_CASE : int = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__)
_SCREAMING_SNAKE_CASE : Dict = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__)
_SCREAMING_SNAKE_CASE : List[Any] = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# text only
_SCREAMING_SNAKE_CASE : List[Any] = model(UpperCAmelCase__)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
_SCREAMING_SNAKE_CASE : Optional[Any] = model(pixel_values=UpperCAmelCase__)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size))
def _lowerCAmelCase ( self : List[str] , _A : List[Any] , _A : Optional[int] , _A : str , _A : Dict , _A : Optional[int] , _A : int , _A : Union[str, Any] , _A : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
_SCREAMING_SNAKE_CASE : Tuple = LayoutLMvaForSequenceClassification(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCAmelCase ( self : Tuple , _A : Union[str, Any] , _A : str , _A : Union[str, Any] , _A : Tuple , _A : List[str] , _A : List[str] , _A : Union[str, Any] , _A : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
_SCREAMING_SNAKE_CASE : Dict = LayoutLMvaForTokenClassification(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
_SCREAMING_SNAKE_CASE : Tuple = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels))
def _lowerCAmelCase ( self : Optional[int] , _A : List[str] , _A : Optional[int] , _A : Tuple , _A : List[Any] , _A : Optional[Any] , _A : Any , _A : List[Any] , _A : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = LayoutLMvaForQuestionAnswering(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
_SCREAMING_SNAKE_CASE : List[str] = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
(
_SCREAMING_SNAKE_CASE
) : Optional[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : str = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _snake_case ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a = False
a = False
a = False
a = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self : str , _A : int , _A : Optional[int] , _A : List[Any] , _A : List[str] , _A : Tuple):
"""simple docstring"""
return True
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = LayoutLMvaModelTester(self)
_SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7)
def _lowerCAmelCase ( self : str , _A : List[Any] , _A : Optional[int] , _A : Dict=False):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = copy.deepcopy(UpperCAmelCase__)
if model_class in get_values(UpperCAmelCase__):
_SCREAMING_SNAKE_CASE : List[Any] = {
k: v.unsqueeze(1).expand(-1 , self.model_tester.num_choices , -1).contiguous()
if isinstance(UpperCAmelCase__ , torch.Tensor) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase__):
_SCREAMING_SNAKE_CASE : List[str] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__)
elif model_class in get_values(UpperCAmelCase__):
_SCREAMING_SNAKE_CASE : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__)
_SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__)
elif model_class in [
*get_values(UpperCAmelCase__),
]:
_SCREAMING_SNAKE_CASE : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__)
elif model_class in [
*get_values(UpperCAmelCase__),
]:
_SCREAMING_SNAKE_CASE : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase__ , )
return inputs_dict
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE : Optional[Any] = type
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__)
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__)
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__)
@slow
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : List[str] = LayoutLMvaModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def lowerCamelCase_()-> List[str]:
_SCREAMING_SNAKE_CASE : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""").to(UpperCAmelCase__)
_SCREAMING_SNAKE_CASE : Optional[Any] = self.default_image_processor
_SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img()
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=UpperCAmelCase__ , return_tensors="""pt""").pixel_values.to(UpperCAmelCase__)
_SCREAMING_SNAKE_CASE : Any = torch.tensor([[1, 2]])
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0)
# forward pass
_SCREAMING_SNAKE_CASE : int = model(
input_ids=input_ids.to(UpperCAmelCase__) , bbox=bbox.to(UpperCAmelCase__) , pixel_values=pixel_values.to(UpperCAmelCase__) , )
# verify the logits
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1_9_9, 7_6_8))
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase__)
_SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]]).to(UpperCAmelCase__)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1e-4)) | 700 | """simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowerCamelCase_(__SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=1_026 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , __SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , )-> Union[str, Any]:
set_seed(3 )
# generate train_data and objective_set
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = generate_datasets(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , number=__SCREAMING_SNAKE_CASE , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_SCREAMING_SNAKE_CASE : Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
_SCREAMING_SNAKE_CASE : Any = load_gpta("""gpt2""" ).to(__SCREAMING_SNAKE_CASE )
print("""computing perplexity on objective set""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).item()
print("""perplexity on objective set:""" , __SCREAMING_SNAKE_CASE )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=15 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE="igf_model.pt" , )-> Optional[int]:
set_seed(42 )
# Load pre-trained model
_SCREAMING_SNAKE_CASE : Any = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
_SCREAMING_SNAKE_CASE : Union[str, Any] = SecondaryLearner(__SCREAMING_SNAKE_CASE )
# Train secondary learner
_SCREAMING_SNAKE_CASE : Any = train_secondary_learner(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_epochs=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , eval_freq=100 , igf_model_path=__SCREAMING_SNAKE_CASE , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_000 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=recopy_gpta , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Tuple = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = RandomSampler(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = DataLoader(__SCREAMING_SNAKE_CASE , sampler=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(__SCREAMING_SNAKE_CASE )) + 1
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : Any = torch.zeros((1, context_len) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = recopy_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
model.train()
if secondary_learner is not None:
secondary_learner.to(__SCREAMING_SNAKE_CASE )
secondary_learner.eval()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Optional[int] = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : int = []
# Compute the performance of the transformer model at the beginning
_SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
test_perps.append(__SCREAMING_SNAKE_CASE )
print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE )
for epoch in range(int(__SCREAMING_SNAKE_CASE ) ):
for step, example in enumerate(__SCREAMING_SNAKE_CASE ):
torch.cuda.empty_cache()
_SCREAMING_SNAKE_CASE : Any = random.randint(0 , example.size(2 ) - context_len - 1 )
_SCREAMING_SNAKE_CASE : int = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[str] = True
if secondary_learner is not None:
_SCREAMING_SNAKE_CASE : List[Any] = secondary_learner.forward(
torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__SCREAMING_SNAKE_CASE ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
_SCREAMING_SNAKE_CASE : Dict = -1
if predicted_q < threshold:
_SCREAMING_SNAKE_CASE : List[str] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_SCREAMING_SNAKE_CASE : Any = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
test_perps.append(__SCREAMING_SNAKE_CASE )
print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowerCamelCase_()-> Tuple:
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=__SCREAMING_SNAKE_CASE , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1_000 , type=__SCREAMING_SNAKE_CASE , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=128 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=__SCREAMING_SNAKE_CASE , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1_026 , type=__SCREAMING_SNAKE_CASE , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=__SCREAMING_SNAKE_CASE , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=__SCREAMING_SNAKE_CASE , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=__SCREAMING_SNAKE_CASE , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
_SCREAMING_SNAKE_CASE : Optional[int] = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
_SCREAMING_SNAKE_CASE : int = training_secondary_learner(
__SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
_SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=__SCREAMING_SNAKE_CASE , secondary_learner=__SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 635 | 0 |
"""simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE = 1_000_000 )-> Any:
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : Optional[int] = 1
_SCREAMING_SNAKE_CASE : Optional[int] = {1: 1}
for inputa in range(2 , _lowerCAmelCase ):
_SCREAMING_SNAKE_CASE : Tuple = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_SCREAMING_SNAKE_CASE : int = (3 * number) + 1
counter += 1
if inputa not in counters:
_SCREAMING_SNAKE_CASE : Optional[Any] = counter
if counter > pre_counter:
_SCREAMING_SNAKE_CASE : Union[str, Any] = inputa
_SCREAMING_SNAKE_CASE : Any = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 701 | """simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( __snake_case ):
"""simple docstring"""
a = ["image_processor", "tokenizer"]
a = "ChineseCLIPImageProcessor"
a = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Dict , _A : Tuple=None , _A : List[Any]=None , **_A : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _A , )
_SCREAMING_SNAKE_CASE : str = kwargs.pop("""feature_extractor""")
_SCREAMING_SNAKE_CASE : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(_A , _A)
_SCREAMING_SNAKE_CASE : Dict = self.image_processor
def __call__( self : Optional[int] , _A : Optional[Any]=None , _A : Any=None , _A : Tuple=None , **_A : int):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(_A , return_tensors=_A , **_A)
if images is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(_A , return_tensors=_A , **_A)
if text is not None and images is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A) , tensor_type=_A)
def _lowerCAmelCase ( self : str , *_A : Any , **_A : Any):
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A)
def _lowerCAmelCase ( self : Union[str, Any] , *_A : List[Any] , **_A : Any):
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A)
@property
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _A , )
return self.image_processor_class
| 635 | 0 |
"""simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]:
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = sum(lowerCamelCase_ ) / len(lowerCamelCase_ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 | """simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = ['''model.decoder.embed_positions.weights''']
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]:
if "emb" in name:
_SCREAMING_SNAKE_CASE : List[Any] = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
_SCREAMING_SNAKE_CASE : List[str] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
_SCREAMING_SNAKE_CASE : int = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
_SCREAMING_SNAKE_CASE : Dict = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
_SCREAMING_SNAKE_CASE : Dict = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
_SCREAMING_SNAKE_CASE : Tuple = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
_SCREAMING_SNAKE_CASE : str = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple[Dict, Dict]:
_SCREAMING_SNAKE_CASE : str = list(state_dict.keys() )
_SCREAMING_SNAKE_CASE : Tuple = {}
for key in keys:
_SCREAMING_SNAKE_CASE : Dict = state_dict.pop(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : int = rename_keys(__SCREAMING_SNAKE_CASE )
if "in_proj_weight" in key:
# split fused qkv proj
_SCREAMING_SNAKE_CASE : str = val[:hidden_size, :]
_SCREAMING_SNAKE_CASE : Any = val[hidden_size : 2 * hidden_size, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_SCREAMING_SNAKE_CASE : int = val
else:
_SCREAMING_SNAKE_CASE : Dict = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
_SCREAMING_SNAKE_CASE : Optional[Any] = 1_024
_SCREAMING_SNAKE_CASE : str = 24
_SCREAMING_SNAKE_CASE : Any = 16
elif checkpoint == "medium":
_SCREAMING_SNAKE_CASE : Dict = 1_536
_SCREAMING_SNAKE_CASE : Union[str, Any] = 48
_SCREAMING_SNAKE_CASE : Optional[Any] = 24
elif checkpoint == "large":
_SCREAMING_SNAKE_CASE : List[Any] = 2_048
_SCREAMING_SNAKE_CASE : Optional[int] = 48
_SCREAMING_SNAKE_CASE : str = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = MusicgenDecoderConfig(
hidden_size=__SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=__SCREAMING_SNAKE_CASE , num_attention_heads=__SCREAMING_SNAKE_CASE , )
return config
@torch.no_grad()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="cpu" )-> str:
_SCREAMING_SNAKE_CASE : str = MusicGen.get_pretrained(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[str] = decoder_config_from_checkpoint(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = fairseq_model.lm.state_dict()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = rename_state_dict(
__SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size )
_SCREAMING_SNAKE_CASE : Tuple = TaEncoderModel.from_pretrained("""t5-base""" )
_SCREAMING_SNAKE_CASE : List[Any] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
_SCREAMING_SNAKE_CASE : str = MusicgenForCausalLM(__SCREAMING_SNAKE_CASE ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
_SCREAMING_SNAKE_CASE : Dict = MusicgenForConditionalGeneration(text_encoder=__SCREAMING_SNAKE_CASE , audio_encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__SCREAMING_SNAKE_CASE )
# check we can do a forward pass
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_SCREAMING_SNAKE_CASE : Dict = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[int] = model(input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
_SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""t5-base""" )
_SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
_SCREAMING_SNAKE_CASE : Optional[int] = MusicgenProcessor(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
# set the appropriate bos/pad token ids
_SCREAMING_SNAKE_CASE : Optional[Any] = 2_048
_SCREAMING_SNAKE_CASE : List[Any] = 2_048
# set other default generation config params
_SCREAMING_SNAKE_CASE : Any = int(30 * audio_encoder.config.frame_rate )
_SCREAMING_SNAKE_CASE : Tuple = True
_SCREAMING_SNAKE_CASE : int = 3.0
if pytorch_dump_folder is not None:
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(__SCREAMING_SNAKE_CASE )
processor.push_to_hub(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 635 | 0 |
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase_ = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def lowerCamelCase_()-> Any:
_SCREAMING_SNAKE_CASE : Optional[int] = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_SCREAMING_SNAKE_CASE : Optional[Any] = get_sagemaker_input()
else:
_SCREAMING_SNAKE_CASE : Any = get_cluster_input()
return config
def lowerCamelCase_(__SCREAMING_SNAKE_CASE=None )-> Optional[Any]:
if subparsers is not None:
_SCREAMING_SNAKE_CASE : Any = subparsers.add_parser("""config""" , description=UpperCamelCase__ )
else:
_SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser("""Accelerate config command""" , description=UpperCamelCase__ )
parser.add_argument(
"""--config_file""" , default=UpperCamelCase__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have """
"""such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed """
"""with \'huggingface\'."""
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str:
_SCREAMING_SNAKE_CASE : List[str] = get_user_input()
if args.config_file is not None:
_SCREAMING_SNAKE_CASE : int = args.config_file
else:
if not os.path.isdir(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(UpperCamelCase__ )
else:
config.to_yaml_file(UpperCamelCase__ )
print(F"""accelerate configuration saved at {config_file}""" )
def lowerCamelCase_()-> int:
_SCREAMING_SNAKE_CASE : Optional[int] = config_command_parser()
_SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
config_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 703 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "sew"
def __init__( self : List[Any] , _A : Tuple=3_2 , _A : str=7_6_8 , _A : Dict=1_2 , _A : Tuple=1_2 , _A : Optional[Any]=3_0_7_2 , _A : List[str]=2 , _A : Dict="gelu" , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.0 , _A : str=0.1 , _A : Tuple=0.1 , _A : Optional[int]=0.02 , _A : Dict=1e-5 , _A : str="group" , _A : Tuple="gelu" , _A : Union[str, Any]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _A : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _A : Any=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _A : Tuple=False , _A : Tuple=1_2_8 , _A : int=1_6 , _A : Union[str, Any]=True , _A : Optional[Any]=0.05 , _A : List[Any]=1_0 , _A : Union[str, Any]=2 , _A : Tuple=0.0 , _A : Union[str, Any]=1_0 , _A : Optional[int]=0 , _A : Union[str, Any]="mean" , _A : Optional[int]=False , _A : List[Any]=False , _A : int=2_5_6 , _A : str=0 , _A : Optional[int]=1 , _A : List[Any]=2 , **_A : Dict , ):
"""simple docstring"""
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A)
_SCREAMING_SNAKE_CASE : str = hidden_size
_SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_norm
_SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_activation
_SCREAMING_SNAKE_CASE : Dict = list(_A)
_SCREAMING_SNAKE_CASE : int = list(_A)
_SCREAMING_SNAKE_CASE : int = list(_A)
_SCREAMING_SNAKE_CASE : str = conv_bias
_SCREAMING_SNAKE_CASE : Tuple = num_conv_pos_embeddings
_SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embedding_groups
_SCREAMING_SNAKE_CASE : Tuple = len(self.conv_dim)
_SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[str] = intermediate_size
_SCREAMING_SNAKE_CASE : str = squeeze_factor
_SCREAMING_SNAKE_CASE : Dict = hidden_act
_SCREAMING_SNAKE_CASE : str = num_attention_heads
_SCREAMING_SNAKE_CASE : Dict = hidden_dropout
_SCREAMING_SNAKE_CASE : Tuple = attention_dropout
_SCREAMING_SNAKE_CASE : int = activation_dropout
_SCREAMING_SNAKE_CASE : Any = feat_proj_dropout
_SCREAMING_SNAKE_CASE : str = final_dropout
_SCREAMING_SNAKE_CASE : Union[str, Any] = layerdrop
_SCREAMING_SNAKE_CASE : Any = layer_norm_eps
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : List[Any] = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_SCREAMING_SNAKE_CASE : List[Any] = apply_spec_augment
_SCREAMING_SNAKE_CASE : List[Any] = mask_time_prob
_SCREAMING_SNAKE_CASE : List[str] = mask_time_length
_SCREAMING_SNAKE_CASE : List[Any] = mask_time_min_masks
_SCREAMING_SNAKE_CASE : List[Any] = mask_feature_prob
_SCREAMING_SNAKE_CASE : int = mask_feature_length
_SCREAMING_SNAKE_CASE : List[Any] = mask_feature_min_masks
# ctc loss
_SCREAMING_SNAKE_CASE : int = ctc_loss_reduction
_SCREAMING_SNAKE_CASE : Optional[int] = ctc_zero_infinity
# sequence classification
_SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum
_SCREAMING_SNAKE_CASE : List[str] = classifier_proj_size
@property
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1)
| 635 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.