code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Any = filter(lambda __lowercase : p.requires_grad , model.parameters() )
UpperCAmelCase_ : Optional[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__UpperCamelCase : Dict = logging.getLogger(__name__)
def snake_case_ ( __lowercase , __lowercase ):
if metric == "rouge2":
UpperCAmelCase_ : List[Any] = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
UpperCAmelCase_ : str = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
UpperCAmelCase_ : Optional[Any] = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
UpperCAmelCase_ : Union[str, Any] = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
''' function.''' )
UpperCAmelCase_ : Optional[int] = ModelCheckpoint(
dirpath=__lowercase , filename=__lowercase , monitor=F'''val_{metric}''' , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def snake_case_ ( __lowercase , __lowercase ):
return EarlyStopping(
monitor=F'''val_{metric}''' , mode='''min''' if '''loss''' in metric else '''max''' , patience=__lowercase , verbose=__lowercase , )
class lowerCAmelCase__( pl.Callback ):
'''simple docstring'''
def _lowerCamelCase ( self : Dict , __snake_case : Optional[int] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = {f'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__snake_case )
@rank_zero_only
def _lowerCamelCase ( self : Tuple , __snake_case : pl.Trainer , __snake_case : pl.LightningModule , __snake_case : str , __snake_case : Optional[Any]=True ):
'''simple docstring'''
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
UpperCAmelCase_ : int = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
UpperCAmelCase_ : int = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCAmelCase_ : Optional[Any] = od / '''test_results.txt'''
UpperCAmelCase_ : Any = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCAmelCase_ : Tuple = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
UpperCAmelCase_ : Tuple = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=__snake_case )
generations_file.parent.mkdir(exist_ok=__snake_case )
with open(__snake_case , '''a+''' ) as writer:
for key in sorted(__snake_case ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCAmelCase_ : str = metrics[key]
if isinstance(__snake_case , torch.Tensor ):
UpperCAmelCase_ : Dict = val.item()
UpperCAmelCase_ : Optional[int] = f'''{key}: {val:.6f}\n'''
writer.write(__snake_case )
if not save_generations:
return
if "preds" in metrics:
UpperCAmelCase_ : Union[str, Any] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(__snake_case )
@rank_zero_only
def _lowerCamelCase ( self : Any , __snake_case : Tuple , __snake_case : List[str] ):
'''simple docstring'''
try:
UpperCAmelCase_ : List[str] = pl_module.model.model.num_parameters()
except AttributeError:
UpperCAmelCase_ : Union[str, Any] = pl_module.model.num_parameters()
UpperCAmelCase_ : Tuple = count_trainable_parameters(__snake_case )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def _lowerCamelCase ( self : List[str] , __snake_case : pl.Trainer , __snake_case : pl.LightningModule ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__snake_case , __snake_case , '''test''' )
@rank_zero_only
def _lowerCamelCase ( self : Any , __snake_case : pl.Trainer , __snake_case : Optional[int] ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 720
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( __lowercase , __lowercase , __lowercase=None , **__lowercase ):
UpperCAmelCase_ : Tuple = [x.strip() for x in open(__lowercase ).readlines()]
UpperCAmelCase_ : Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )]
UpperCAmelCase_ : int = calculate_rouge(__lowercase , __lowercase , **__lowercase )
if save_path is not None:
save_json(__lowercase , __lowercase , indent=__lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 641
| 0
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : int = logging.get_logger()
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
A_ : nn.Module
A_ : List[nn.Module] = field(default_factory=snake_case__ )
A_ : list = field(default_factory=snake_case__ )
def _lowerCamelCase ( self : List[Any] , __snake_case : Any , __snake_case : Tensor , __snake_case : Tensor ):
'''simple docstring'''
UpperCAmelCase_ : Any = len(list(m.modules() ) ) == 1 or isinstance(__snake_case , nn.Convad ) or isinstance(__snake_case , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__snake_case )
def __call__( self : Union[str, Any] , __snake_case : Tensor ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__snake_case )
[x.remove() for x in self.handles]
return self
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return list(filter(lambda __snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
A_ : nn.Module
A_ : nn.Module
A_ : int = 1
A_ : List = field(default_factory=snake_case__ )
A_ : List = field(default_factory=snake_case__ )
A_ : bool = True
def __call__( self : Tuple , __snake_case : Tensor ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = Tracker(self.dest )(__snake_case ).parametrized
UpperCAmelCase_ : Union[str, Any] = Tracker(self.src )(__snake_case ).parametrized
UpperCAmelCase_ : List[Any] = list(filter(lambda __snake_case : type(__snake_case ) not in self.src_skip , __snake_case ) )
UpperCAmelCase_ : int = list(filter(lambda __snake_case : type(__snake_case ) not in self.dest_skip , __snake_case ) )
if len(__snake_case ) != len(__snake_case ) and self.raise_if_mismatch:
raise Exception(
f'''Numbers of operations are different. Source module has {len(__snake_case )} operations while'''
f''' destination module has {len(__snake_case )}.''' )
for dest_m, src_m in zip(__snake_case , __snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __snake_case : nn.Module ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('''conv1''', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('''block''' ), f'''Unexpected layer name {k}'''
UpperCAmelCase_ : Union[str, Any] = len(__snake_case ) + 1
feature_blocks.append((f'''res{block_index}''', v) )
UpperCAmelCase_ : List[Any] = nn.ModuleDict(__snake_case )
def _lowerCamelCase ( self : str , __snake_case : Tensor ):
'''simple docstring'''
return get_trunk_forward_outputs(
__snake_case , out_feat_keys=__snake_case , feature_blocks=self._feature_blocks , )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
def _lowerCamelCase ( self : Tuple , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : str = x.split('''-''' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Optional[Any] , __snake_case : str ):
'''simple docstring'''
# default to timm!
if x not in self:
UpperCAmelCase_ : Optional[Any] = self.convert_name_to_timm(__snake_case )
UpperCAmelCase_ : str = partial(lambda: (timm.create_model(__snake_case , pretrained=__snake_case ).eval(), None) )
else:
UpperCAmelCase_ : List[Any] = super().__getitem__(__snake_case )
return val
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
def __getitem__( self : int , __snake_case : str ):
'''simple docstring'''
if "seer" in x and "in1k" not in x:
UpperCAmelCase_ : List[str] = RegNetModel
else:
UpperCAmelCase_ : int = RegNetForImageClassification
return val
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
for from_key, to_key in keys:
UpperCAmelCase_ : Dict = from_state_dict[from_key].clone()
print(F'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = True , ):
print(F'''Converting {name}...''' )
with torch.no_grad():
UpperCAmelCase_ : int = from_model_func()
UpperCAmelCase_ : Tuple = our_model_func(__lowercase ).eval()
UpperCAmelCase_ : Any = ModuleTransfer(src=__lowercase , dest=__lowercase , raise_if_mismatch=__lowercase )
UpperCAmelCase_ : Union[str, Any] = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(__lowercase )
if from_state_dict is not None:
UpperCAmelCase_ : List[Any] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
UpperCAmelCase_ : Union[str, Any] = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
UpperCAmelCase_ : Dict = manually_copy_vissl_head(__lowercase , our_model.state_dict() , __lowercase )
our_model.load_state_dict(__lowercase )
UpperCAmelCase_ : Any = our_model(__lowercase , output_hidden_states=__lowercase )
UpperCAmelCase_ : List[Any] = (
our_outputs.logits if isinstance(__lowercase , __lowercase ) else our_outputs.last_hidden_state
)
UpperCAmelCase_ : Tuple = from_model(__lowercase )
UpperCAmelCase_ : Optional[Any] = from_output[-1] if type(__lowercase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
UpperCAmelCase_ : Optional[int] = our_outputs.hidden_states[-1]
assert torch.allclose(__lowercase , __lowercase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=__lowercase , )
UpperCAmelCase_ : Tuple = 2_2_4 if '''seer''' not in name else 3_8_4
# we can use the convnext one
UpperCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=__lowercase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=__lowercase , )
print(F'''Pushed {name}''' )
def snake_case_ ( __lowercase , __lowercase = None , __lowercase = True ):
UpperCAmelCase_ : Optional[Any] = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ : int = 1_0_0_0
UpperCAmelCase_ : Union[str, Any] = (1, num_labels)
UpperCAmelCase_ : str = '''huggingface/label-files'''
UpperCAmelCase_ : List[Any] = num_labels
UpperCAmelCase_ : Optional[int] = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase_ : Any = {int(__lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Optional[Any] = idalabel
UpperCAmelCase_ : Any = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Dict = partial(__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase )
UpperCAmelCase_ : Any = {
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 , layer_type='''x''' ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 1_2] , hidden_sizes=[3_2, 6_4, 1_6_0, 3_8_4] , groups_width=1_6 , layer_type='''x''' ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[4_8, 9_6, 2_4_0, 5_2_8] , groups_width=2_4 , layer_type='''x''' ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[6_4, 1_2_8, 2_8_8, 6_7_2] , groups_width=1_6 , layer_type='''x''' ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 2] , hidden_sizes=[7_2, 1_6_8, 4_0_8, 9_1_2] , groups_width=2_4 , layer_type='''x''' ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 1_5, 2] , hidden_sizes=[9_6, 1_9_2, 4_3_2, 1_0_0_8] , groups_width=4_8 , layer_type='''x''' ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_4, 2] , hidden_sizes=[8_0, 2_4_0, 5_6_0, 1_3_6_0] , groups_width=4_0 , layer_type='''x''' ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 3_9_2, 7_8_4, 1_6_2_4] , groups_width=5_6 , layer_type='''x''' ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_5, 1] , hidden_sizes=[8_0, 2_4_0, 7_2_0, 1_9_2_0] , groups_width=1_2_0 , layer_type='''x''' ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 , layer_type='''x''' ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 1_3, 1] , hidden_sizes=[2_5_6, 5_1_2, 8_9_6, 2_0_4_8] , groups_width=1_2_8 , layer_type='''x''' ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 1_3, 1] , hidden_sizes=[3_3_6, 6_7_2, 1_3_4_4, 2_5_2_0] , groups_width=1_6_8 , layer_type='''x''' ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[4_8, 1_0_4, 2_0_8, 4_4_0] , groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[4_8, 1_1_2, 2_5_6, 6_0_8] , groups_width=1_6 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[6_4, 1_2_8, 3_2_0, 7_6_8] , groups_width=1_6 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 1_7, 2] , hidden_sizes=[4_8, 1_2_0, 3_3_6, 8_8_8] , groups_width=2_4 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_3, 1] , hidden_sizes=[7_2, 2_1_6, 5_7_6, 1_5_1_2] , groups_width=2_4 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 1_2, 2] , hidden_sizes=[1_2_8, 1_9_2, 5_1_2, 1_0_8_8] , groups_width=6_4 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 1_4, 2] , hidden_sizes=[1_4_4, 2_8_8, 5_7_6, 1_2_9_6] , groups_width=7_2 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 4_4_8, 8_9_6, 2_0_1_6] , groups_width=5_6 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 1_2_3_2, 3_0_2_4] , groups_width=1_1_2 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
}
UpperCAmelCase_ : List[Any] = NameToOurModelFuncMap()
UpperCAmelCase_ : Optional[int] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(__lowercase , __lowercase ) -> Tuple[nn.Module, Dict]:
UpperCAmelCase_ : str = torch.hub.load_state_dict_from_url(__lowercase , model_dir=str(__lowercase ) , map_location='''cpu''' )
UpperCAmelCase_ : Tuple = model_func()
# check if we have a head, if yes add it
UpperCAmelCase_ : Tuple = files['''classy_state_dict''']['''base_model''']['''model''']
UpperCAmelCase_ : Optional[int] = model_state_dict['''trunk''']
model.load_state_dict(__lowercase )
return model.eval(), model_state_dict["heads"]
# pretrained
UpperCAmelCase_ : List[Any] = partial(
__lowercase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ : Optional[int] = partial(
__lowercase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ : Optional[int] = partial(
__lowercase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCAmelCase_ : Union[str, Any] = partial(
__lowercase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
UpperCAmelCase_ : str = partial(
__lowercase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ : List[Any] = partial(
__lowercase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ : int = partial(
__lowercase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCAmelCase_ : List[Any] = partial(
__lowercase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
__lowercase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __lowercase , __lowercase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__lowercase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __lowercase , __lowercase , __lowercase , )
return config, expected_shape
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported regnet* architecture,'
' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
__UpperCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 721
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : int , __snake_case : List[Any] , __snake_case : List[Any]=13 , __snake_case : str=7 , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=True , __snake_case : str=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=True , __snake_case : List[str]=False , __snake_case : List[str]=False , __snake_case : Tuple=False , __snake_case : List[str]=2 , __snake_case : Optional[int]=99 , __snake_case : Tuple=0 , __snake_case : int=32 , __snake_case : Optional[int]=5 , __snake_case : str=4 , __snake_case : str=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : List[str]=512 , __snake_case : Tuple=2 , __snake_case : List[Any]=0.02 , __snake_case : Any=2 , __snake_case : Optional[int]=4 , __snake_case : Optional[Any]="last" , __snake_case : Dict=True , __snake_case : Any=None , __snake_case : str=0 , ):
'''simple docstring'''
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : List[Any] = use_input_lengths
UpperCAmelCase_ : Dict = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : int = gelu_activation
UpperCAmelCase_ : str = sinusoidal_embeddings
UpperCAmelCase_ : List[str] = causal
UpperCAmelCase_ : Tuple = asm
UpperCAmelCase_ : List[Any] = n_langs
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Any = n_special
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : List[Any] = num_choices
UpperCAmelCase_ : Any = summary_type
UpperCAmelCase_ : Optional[int] = use_proj
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = bos_token_id
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
if self.use_input_lengths:
UpperCAmelCase_ : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : int = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case , lengths=__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : str , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Tuple , __snake_case : Any , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
UpperCAmelCase_ : Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : str , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : List[str] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
UpperCAmelCase_ : Optional[Any] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((UpperCAmelCase_) , ) : Union[str, Any] = result_with_labels.to_tuple()
UpperCAmelCase_ : Optional[int] = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((UpperCAmelCase_) , ) : str = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : Optional[int] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : Any , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Optional[int] = XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : List[str] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : List[Any] , __snake_case : str , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ : int = self.num_choices
UpperCAmelCase_ : int = XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Any = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A_ : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self : str , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : str=False ):
'''simple docstring'''
UpperCAmelCase_ : int = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
UpperCAmelCase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = XLMModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def _lowerCamelCase ( self : str , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[int] , __snake_case : Union[str, Any]=False , __snake_case : Optional[Any]=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : Dict = min_length + idx + 1
UpperCAmelCase_ : List[Any] = min_length + idx + 1
UpperCAmelCase_ : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def _lowerCamelCase ( self : List[Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[Any]=False , __snake_case : str=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : str = min_length + idx + 1
UpperCAmelCase_ : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(__snake_case )
UpperCAmelCase_ : str = torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
UpperCAmelCase_ : Union[str, Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase_ : Dict = model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 641
| 0
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def snake_case_ ( ):
UpperCAmelCase_ : str = HfArgumentParser(__lowercase )
UpperCAmelCase_ : Optional[Any] = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase_ : Optional[int] = TensorFlowBenchmark(args=__lowercase )
try:
UpperCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCAmelCase_ : List[Any] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
UpperCAmelCase_ : List[str] = ''' '''.join(str(__lowercase ).split(''' ''' )[:-1] )
UpperCAmelCase_ : Optional[int] = ''''''
UpperCAmelCase_ : Dict = eval(str(__lowercase ).split(''' ''' )[-1] )
UpperCAmelCase_ : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__lowercase )
if len(__lowercase ) > 0:
UpperCAmelCase_ : Tuple = full_error_msg + begin_error_msg + str(__lowercase )
raise ValueError(__lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 700
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__UpperCamelCase : Dict = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(snake_case__ )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'rag'
A_ : Tuple = True
def __init__( self : int , __snake_case : List[str]=None , __snake_case : List[Any]=True , __snake_case : Optional[int]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : Any=None , __snake_case : Optional[int]=None , __snake_case : Optional[int]=" / " , __snake_case : Any=" // " , __snake_case : Tuple=5 , __snake_case : Union[str, Any]=300 , __snake_case : Any=768 , __snake_case : Tuple=8 , __snake_case : int="wiki_dpr" , __snake_case : Optional[int]="train" , __snake_case : Tuple="compressed" , __snake_case : Optional[int]=None , __snake_case : List[Any]=None , __snake_case : Optional[int]=False , __snake_case : str=False , __snake_case : Dict=0.0 , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=False , __snake_case : str=False , __snake_case : str=False , __snake_case : Optional[Any]=True , __snake_case : int=None , **__snake_case : str , ):
'''simple docstring'''
super().__init__(
bos_token_id=__snake_case , pad_token_id=__snake_case , eos_token_id=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , is_encoder_decoder=__snake_case , prefix=__snake_case , vocab_size=__snake_case , **__snake_case , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCAmelCase_ : Dict = kwargs.pop('''question_encoder''' )
UpperCAmelCase_ : List[Any] = question_encoder_config.pop('''model_type''' )
UpperCAmelCase_ : Any = kwargs.pop('''generator''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : int = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = reduce_loss
UpperCAmelCase_ : List[Any] = label_smoothing
UpperCAmelCase_ : Tuple = exclude_bos_score
UpperCAmelCase_ : int = do_marginalize
UpperCAmelCase_ : Tuple = title_sep
UpperCAmelCase_ : Union[str, Any] = doc_sep
UpperCAmelCase_ : Any = n_docs
UpperCAmelCase_ : Optional[int] = max_combined_length
UpperCAmelCase_ : Any = dataset
UpperCAmelCase_ : List[Any] = dataset_split
UpperCAmelCase_ : Union[str, Any] = index_name
UpperCAmelCase_ : List[str] = retrieval_vector_size
UpperCAmelCase_ : Optional[Any] = retrieval_batch_size
UpperCAmelCase_ : Optional[int] = passages_path
UpperCAmelCase_ : Optional[Any] = index_path
UpperCAmelCase_ : List[Any] = use_dummy_dataset
UpperCAmelCase_ : int = output_retrieved
UpperCAmelCase_ : int = do_deduplication
UpperCAmelCase_ : Optional[int] = use_cache
if self.forced_eos_token_id is None:
UpperCAmelCase_ : int = getattr(self.generator , '''forced_eos_token_id''' , __snake_case )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : str ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Optional[int] = self.question_encoder.to_dict()
UpperCAmelCase_ : Dict = self.generator.to_dict()
UpperCAmelCase_ : Optional[Any] = self.__class__.model_type
return output
| 641
| 0
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowerCAmelCase__( unittest.TestCase , snake_case__ ):
'''simple docstring'''
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Any = load_tool('''text-classification''' )
self.tool.setup()
UpperCAmelCase_ : str = load_tool('''text-classification''' , remote=__snake_case )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(__snake_case , '''positive''' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.remote_tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(__snake_case , '''positive''' )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(__snake_case , '''positive''' )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.remote_tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(__snake_case , '''positive''' )
| 701
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def snake_case_ ( ):
UpperCAmelCase_ : str = HfArgumentParser(__lowercase )
UpperCAmelCase_ : Optional[Any] = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase_ : Optional[int] = TensorFlowBenchmark(args=__lowercase )
try:
UpperCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCAmelCase_ : List[Any] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
UpperCAmelCase_ : List[str] = ''' '''.join(str(__lowercase ).split(''' ''' )[:-1] )
UpperCAmelCase_ : Optional[int] = ''''''
UpperCAmelCase_ : Dict = eval(str(__lowercase ).split(''' ''' )[-1] )
UpperCAmelCase_ : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__lowercase )
if len(__lowercase ) > 0:
UpperCAmelCase_ : Tuple = full_error_msg + begin_error_msg + str(__lowercase )
raise ValueError(__lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 641
| 0
|
from math import pi
def snake_case_ ( __lowercase , __lowercase ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 702
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[Any] = 'unispeech-sat'
def __init__( self : int , __snake_case : Optional[int]=32 , __snake_case : Dict=768 , __snake_case : Optional[Any]=12 , __snake_case : Optional[int]=12 , __snake_case : Dict=3_072 , __snake_case : List[str]="gelu" , __snake_case : Any=0.1 , __snake_case : Tuple=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Tuple=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Dict=0.02 , __snake_case : Optional[Any]=1E-5 , __snake_case : Optional[int]="group" , __snake_case : str="gelu" , __snake_case : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , __snake_case : str=(5, 2, 2, 2, 2, 2, 2) , __snake_case : Tuple=(10, 3, 3, 3, 3, 2, 2) , __snake_case : int=False , __snake_case : Optional[int]=128 , __snake_case : Any=16 , __snake_case : Union[str, Any]=False , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=0.05 , __snake_case : Dict=10 , __snake_case : int=2 , __snake_case : Optional[Any]=0.0 , __snake_case : Optional[int]=10 , __snake_case : List[Any]=0 , __snake_case : Optional[int]=320 , __snake_case : int=2 , __snake_case : Any=0.1 , __snake_case : Optional[int]=100 , __snake_case : Tuple=256 , __snake_case : List[str]=256 , __snake_case : List[Any]=0.1 , __snake_case : Tuple="mean" , __snake_case : List[Any]=False , __snake_case : List[str]=False , __snake_case : Optional[Any]=256 , __snake_case : Tuple=(512, 512, 512, 512, 1_500) , __snake_case : Optional[int]=(5, 3, 3, 1, 1) , __snake_case : Any=(1, 2, 3, 1, 1) , __snake_case : int=512 , __snake_case : Optional[int]=0 , __snake_case : Dict=1 , __snake_case : Tuple=2 , __snake_case : Union[str, Any]=504 , **__snake_case : List[str] , ):
'''simple docstring'''
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : int = feat_extract_norm
UpperCAmelCase_ : Dict = feat_extract_activation
UpperCAmelCase_ : Union[str, Any] = list(__snake_case )
UpperCAmelCase_ : List[str] = list(__snake_case )
UpperCAmelCase_ : Any = list(__snake_case )
UpperCAmelCase_ : Any = conv_bias
UpperCAmelCase_ : List[str] = num_conv_pos_embeddings
UpperCAmelCase_ : Dict = num_conv_pos_embedding_groups
UpperCAmelCase_ : Optional[int] = len(self.conv_dim )
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = hidden_dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Optional[Any] = activation_dropout
UpperCAmelCase_ : Dict = feat_proj_dropout
UpperCAmelCase_ : Optional[Any] = final_dropout
UpperCAmelCase_ : List[Any] = layerdrop
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : int = num_clusters
UpperCAmelCase_ : int = do_stable_layer_norm
UpperCAmelCase_ : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : int = apply_spec_augment
UpperCAmelCase_ : Optional[Any] = mask_time_prob
UpperCAmelCase_ : str = mask_time_length
UpperCAmelCase_ : Any = mask_time_min_masks
UpperCAmelCase_ : str = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Optional[int] = num_codevectors_per_group
UpperCAmelCase_ : int = num_codevector_groups
UpperCAmelCase_ : List[str] = contrastive_logits_temperature
UpperCAmelCase_ : int = feat_quantizer_dropout
UpperCAmelCase_ : List[str] = num_negatives
UpperCAmelCase_ : Any = codevector_dim
UpperCAmelCase_ : Tuple = proj_codevector_dim
UpperCAmelCase_ : Union[str, Any] = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : Any = ctc_loss_reduction
UpperCAmelCase_ : Optional[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Union[str, Any] = xvector_output_dim
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 641
| 0
|
from math import factorial, pi
def snake_case_ ( __lowercase , __lowercase = 3_0 ):
if not isinstance(__lowercase , (int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(__lowercase , __lowercase ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
UpperCAmelCase_ : Dict = float(__lowercase )
UpperCAmelCase_ : str = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__lowercase ) )
def snake_case_ ( __lowercase , __lowercase = 3_0 ):
if not isinstance(__lowercase , (int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(__lowercase , __lowercase ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
UpperCAmelCase_ : int = float(__lowercase )
UpperCAmelCase_ : List[str] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 703
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case_ ( __lowercase , __lowercase ):
# Load checkpoint
UpperCAmelCase_ : Tuple = torch.load(__lowercase , map_location='''cpu''' )
UpperCAmelCase_ : Optional[int] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
UpperCAmelCase_ : str = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCAmelCase_ : Tuple = v
else:
UpperCAmelCase_ : Union[str, Any] = v
UpperCAmelCase_ : int = chkpt['''params''']
UpperCAmelCase_ : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(__lowercase , (torch.FloatTensor, numpy.ndarray) )}
UpperCAmelCase_ : int = chkpt['''dico_word2id''']
UpperCAmelCase_ : List[Any] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 1_3 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(__lowercase , __lowercase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : Dict = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 641
| 0
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
# General docstring
__UpperCamelCase : Any = 'RegNetConfig'
# Base docstring
__UpperCamelCase : int = 'facebook/regnet-y-040'
__UpperCamelCase : int = [1, 1088, 7, 7]
# Image classification docstring
__UpperCamelCase : str = 'facebook/regnet-y-040'
__UpperCamelCase : int = 'tabby, tabby cat'
__UpperCamelCase : Optional[Any] = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase__( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int = 3 , __snake_case : int = 1 , __snake_case : int = 1 , __snake_case : Optional[str] = "relu" , **__snake_case : List[str] , ):
'''simple docstring'''
super().__init__(**__snake_case )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
UpperCAmelCase_ : str = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
UpperCAmelCase_ : Optional[int] = tf.keras.layers.ConvaD(
filters=__snake_case , kernel_size=__snake_case , strides=__snake_case , padding='''VALID''' , groups=__snake_case , use_bias=__snake_case , name='''convolution''' , )
UpperCAmelCase_ : Optional[int] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
UpperCAmelCase_ : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity
def _lowerCamelCase ( self : Optional[Any] , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.convolution(self.padding(__snake_case ) )
UpperCAmelCase_ : List[Any] = self.normalization(__snake_case )
UpperCAmelCase_ : List[Any] = self.activation(__snake_case )
return hidden_state
class lowerCAmelCase__( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Dict , __snake_case : RegNetConfig , **__snake_case : Optional[int] ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Any = config.num_channels
UpperCAmelCase_ : Tuple = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = shape_list(__snake_case )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
UpperCAmelCase_ : Dict = tf.transpose(__snake_case , perm=(0, 2, 3, 1) )
UpperCAmelCase_ : Tuple = self.embedder(__snake_case )
return hidden_state
class lowerCAmelCase__( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] , __snake_case : int , __snake_case : int = 2 , **__snake_case : int ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : str = tf.keras.layers.ConvaD(
filters=__snake_case , kernel_size=1 , strides=__snake_case , use_bias=__snake_case , name='''convolution''' )
UpperCAmelCase_ : Optional[int] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
def _lowerCamelCase ( self : Any , __snake_case : tf.Tensor , __snake_case : bool = False ):
'''simple docstring'''
return self.normalization(self.convolution(__snake_case ) , training=__snake_case )
class lowerCAmelCase__( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Any , __snake_case : int , __snake_case : int , **__snake_case : Optional[Any] ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Tuple = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__snake_case , name='''pooler''' )
UpperCAmelCase_ : Optional[Any] = [
tf.keras.layers.ConvaD(filters=__snake_case , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=__snake_case , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def _lowerCamelCase ( self : List[str] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.pooler(__snake_case )
for layer_module in self.attention:
UpperCAmelCase_ : List[str] = layer_module(__snake_case )
UpperCAmelCase_ : Any = hidden_state * pooled
return hidden_state
class lowerCAmelCase__( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : int , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 1 , **__snake_case : Union[str, Any] ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Tuple = in_channels != out_channels or stride != 1
UpperCAmelCase_ : int = max(1 , out_channels // config.groups_width )
UpperCAmelCase_ : List[Any] = (
TFRegNetShortCut(__snake_case , stride=__snake_case , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
UpperCAmelCase_ : List[Any] = [
TFRegNetConvLayer(__snake_case , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
__snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(__snake_case , kernel_size=1 , activation=__snake_case , name='''layer.2''' ),
]
UpperCAmelCase_ : List[Any] = ACTaFN[config.hidden_act]
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Any = hidden_state
for layer_module in self.layers:
UpperCAmelCase_ : List[str] = layer_module(__snake_case )
UpperCAmelCase_ : Union[str, Any] = self.shortcut(__snake_case )
hidden_state += residual
UpperCAmelCase_ : Union[str, Any] = self.activation(__snake_case )
return hidden_state
class lowerCAmelCase__( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : str , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 1 , **__snake_case : int ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : List[str] = in_channels != out_channels or stride != 1
UpperCAmelCase_ : List[Any] = max(1 , out_channels // config.groups_width )
UpperCAmelCase_ : Union[str, Any] = (
TFRegNetShortCut(__snake_case , stride=__snake_case , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
UpperCAmelCase_ : Optional[int] = [
TFRegNetConvLayer(__snake_case , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
__snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(__snake_case , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(__snake_case , kernel_size=1 , activation=__snake_case , name='''layer.3''' ),
]
UpperCAmelCase_ : List[str] = ACTaFN[config.hidden_act]
def _lowerCamelCase ( self : str , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = hidden_state
for layer_module in self.layers:
UpperCAmelCase_ : List[str] = layer_module(__snake_case )
UpperCAmelCase_ : Any = self.shortcut(__snake_case )
hidden_state += residual
UpperCAmelCase_ : List[str] = self.activation(__snake_case )
return hidden_state
class lowerCAmelCase__( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[Any] , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 2 , __snake_case : int = 2 , **__snake_case : int ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Optional[Any] = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
UpperCAmelCase_ : List[str] = [
# downsampling is done in the first layer with stride of 2
layer(__snake_case , __snake_case , __snake_case , stride=__snake_case , name='''layers.0''' ),
*[layer(__snake_case , __snake_case , __snake_case , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def _lowerCamelCase ( self : int , __snake_case : List[Any] ):
'''simple docstring'''
for layer_module in self.layers:
UpperCAmelCase_ : Tuple = layer_module(__snake_case )
return hidden_state
class lowerCAmelCase__( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple , __snake_case : RegNetConfig , **__snake_case : Any ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Optional[int] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
UpperCAmelCase_ : List[str] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__snake_case , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__snake_case , __snake_case , __snake_case , depth=__snake_case , name=f'''stages.{i+1}''' ) )
def _lowerCamelCase ( self : Dict , __snake_case : tf.Tensor , __snake_case : bool = False , __snake_case : bool = True ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCAmelCase_ : Optional[Any] = hidden_states + (hidden_state,)
UpperCAmelCase_ : Union[str, Any] = stage_module(__snake_case )
if output_hidden_states:
UpperCAmelCase_ : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__snake_case , hidden_states=__snake_case )
@keras_serializable
class lowerCAmelCase__( tf.keras.layers.Layer ):
'''simple docstring'''
A_ : Dict = RegNetConfig
def __init__( self : int , __snake_case : List[str] , **__snake_case : List[Any] ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : List[Any] = config
UpperCAmelCase_ : int = TFRegNetEmbeddings(__snake_case , name='''embedder''' )
UpperCAmelCase_ : Union[str, Any] = TFRegNetEncoder(__snake_case , name='''encoder''' )
UpperCAmelCase_ : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__snake_case , name='''pooler''' )
@unpack_inputs
def _lowerCamelCase ( self : str , __snake_case : tf.Tensor , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ : Any = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ : Union[str, Any] = self.embedder(__snake_case , training=__snake_case )
UpperCAmelCase_ : Tuple = self.encoder(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , training=__snake_case )
UpperCAmelCase_ : Any = encoder_outputs[0]
UpperCAmelCase_ : Union[str, Any] = self.pooler(__snake_case )
# Change to NCHW output format have uniformity in the modules
UpperCAmelCase_ : str = tf.transpose(__snake_case , perm=(0, 3, 1, 2) )
UpperCAmelCase_ : Dict = tf.transpose(__snake_case , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
UpperCAmelCase_ : Tuple = tuple([tf.transpose(__snake_case , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__snake_case , pooler_output=__snake_case , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[Any] = RegNetConfig
A_ : Dict = 'regnet'
A_ : str = 'pixel_values'
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
__UpperCamelCase : List[Any] = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
__UpperCamelCase : str = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , snake_case__ , )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , __snake_case : RegNetConfig , *__snake_case : Union[str, Any] , **__snake_case : List[str] ):
'''simple docstring'''
super().__init__(__snake_case , *__snake_case , **__snake_case )
UpperCAmelCase_ : str = TFRegNetMainLayer(__snake_case , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCamelCase ( self : List[str] , __snake_case : tf.Tensor , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : Tuple=False , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ : Dict = self.regnet(
pixel_values=__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , training=__snake_case , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , snake_case__ , )
class lowerCAmelCase__( snake_case__ , snake_case__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __snake_case : RegNetConfig , *__snake_case : List[str] , **__snake_case : Optional[int] ):
'''simple docstring'''
super().__init__(__snake_case , *__snake_case , **__snake_case )
UpperCAmelCase_ : Any = config.num_labels
UpperCAmelCase_ : List[Any] = TFRegNetMainLayer(__snake_case , name='''regnet''' )
# classification head
UpperCAmelCase_ : Union[str, Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCamelCase ( self : Tuple , __snake_case : tf.Tensor = None , __snake_case : tf.Tensor = None , __snake_case : bool = None , __snake_case : bool = None , __snake_case : Tuple=False , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ : str = self.regnet(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , training=__snake_case )
UpperCAmelCase_ : List[str] = outputs.pooler_output if return_dict else outputs[1]
UpperCAmelCase_ : List[Any] = self.classifier[0](__snake_case )
UpperCAmelCase_ : Optional[int] = self.classifier[1](__snake_case )
UpperCAmelCase_ : Optional[Any] = None if labels is None else self.hf_compute_loss(labels=__snake_case , logits=__snake_case )
if not return_dict:
UpperCAmelCase_ : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__snake_case , logits=__snake_case , hidden_states=outputs.hidden_states )
| 704
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Optional[Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = VOCAB_FILES_NAMES
A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : str = ['input_ids', 'attention_mask']
A_ : int = TaTokenizer
A_ : List[int] = []
def __init__( self : Union[str, Any] , __snake_case : Tuple=None , __snake_case : List[Any]=None , __snake_case : int="</s>" , __snake_case : List[Any]="<unk>" , __snake_case : Dict="<pad>" , __snake_case : Tuple=100 , __snake_case : int=None , **__snake_case : Any , ):
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ : Optional[int] = [f'''<extra_id_{i}>''' for i in range(__snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCAmelCase_ : Any = len(set(filter(lambda __snake_case : bool('''extra_id_''' in str(__snake_case ) ) , __snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
__snake_case , tokenizer_file=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , extra_ids=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
UpperCAmelCase_ : str = vocab_file
UpperCAmelCase_ : List[str] = False if not self.vocab_file else True
UpperCAmelCase_ : Union[str, Any] = extra_ids
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : List[str] , __snake_case : Tuple ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCAmelCase_ : str = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __snake_case , )
return max_model_length
def _lowerCamelCase ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : str = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def _lowerCamelCase ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCAmelCase_ : int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCamelCase ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : int = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return list(
set(filter(lambda __snake_case : bool(re.search(R'''<extra_id_\d+>''' , __snake_case ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [self.convert_tokens_to_ids(__snake_case ) for token in self.get_sentinel_tokens()]
| 641
| 0
|
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class lowerCAmelCase__( snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = RoFormerTokenizer
A_ : int = RoFormerTokenizerFast
A_ : Dict = True
A_ : Optional[Any] = True
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
def _lowerCamelCase ( self : int , **__snake_case : Any ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__snake_case )
def _lowerCamelCase ( self : List[str] , **__snake_case : str ):
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__snake_case )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''永和服装饰品有限公司,今天天气非常好'''
UpperCAmelCase_ : List[str] = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = self.get_chinese_input_output_texts()
UpperCAmelCase_ : int = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , output_text.split() )
UpperCAmelCase_ : Any = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : Union[str, Any] = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.get_rust_tokenizer()
UpperCAmelCase_ : Union[str, Any] = self.get_chinese_input_output_texts()
UpperCAmelCase_ : List[str] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , output_text.split() )
UpperCAmelCase_ : str = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : List[str] = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
| 705
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : str = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[List[PIL.Image.Image], np.ndarray]
A_ : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 706
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = ['image_processor', 'tokenizer']
A_ : int = 'LayoutLMv2ImageProcessor'
A_ : str = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : List[str]=None , **__snake_case : Optional[int] ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
UpperCAmelCase_ : List[Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__snake_case , __snake_case )
def __call__( self : List[str] , __snake_case : Dict , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Optional[int] , ):
'''simple docstring'''
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCAmelCase_ : Tuple = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_ : Any = features['''words''']
UpperCAmelCase_ : str = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
UpperCAmelCase_ : List[str] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase_ : Optional[int] = self.get_overflowing_images(__snake_case , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase_ : List[Any] = images
return encoded_inputs
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def _lowerCamelCase ( self : List[Any] , *__snake_case : Optional[int] , **__snake_case : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : str , *__snake_case : Optional[Any] , **__snake_case : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __snake_case , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __snake_case , )
return self.image_processor
| 641
| 0
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__UpperCamelCase : List[Any] = {
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 1000,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__UpperCamelCase : str = {
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 1000,
'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__UpperCamelCase : List[str] = {
'sample_size': 256,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__UpperCamelCase : str = {
'num_train_timesteps': 40,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
__UpperCamelCase : Union[str, Any] = {
'num_train_timesteps': 201,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
__UpperCamelCase : Optional[Any] = {
'num_train_timesteps': 151,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
def snake_case_ ( __lowercase ):
if isinstance(__lowercase , __lowercase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=False ):
UpperCAmelCase_ : Dict = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
UpperCAmelCase_ : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
UpperCAmelCase_ : List[Any] = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
UpperCAmelCase_ : Tuple = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
UpperCAmelCase_ : Tuple = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
UpperCAmelCase_ : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
UpperCAmelCase_ : List[str] = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
UpperCAmelCase_ : Any = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
UpperCAmelCase_ : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
UpperCAmelCase_ : int = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
UpperCAmelCase_ : List[str] = checkpoint[F'''{old_prefix}.skip_connection.weight''']
UpperCAmelCase_ : int = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=None ):
UpperCAmelCase_ : Optional[int] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
UpperCAmelCase_ : List[str] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
UpperCAmelCase_ : List[Any] = checkpoint[F'''{old_prefix}.norm.weight''']
UpperCAmelCase_ : Tuple = checkpoint[F'''{old_prefix}.norm.bias''']
UpperCAmelCase_ : Any = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : Dict = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : str = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : Union[str, Any] = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : Optional[int] = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : str = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase_ : Any = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : int = torch.load(__lowercase , map_location='''cpu''' )
UpperCAmelCase_ : Optional[int] = {}
UpperCAmelCase_ : Optional[int] = checkpoint['''time_embed.0.weight''']
UpperCAmelCase_ : Any = checkpoint['''time_embed.0.bias''']
UpperCAmelCase_ : int = checkpoint['''time_embed.2.weight''']
UpperCAmelCase_ : str = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase_ : Any = checkpoint['''label_emb.weight''']
UpperCAmelCase_ : Optional[Any] = checkpoint['''input_blocks.0.0.weight''']
UpperCAmelCase_ : List[Any] = checkpoint['''input_blocks.0.0.bias''']
UpperCAmelCase_ : List[Any] = unet_config['''down_block_types''']
UpperCAmelCase_ : Tuple = unet_config['''layers_per_block''']
UpperCAmelCase_ : int = unet_config['''attention_head_dim''']
UpperCAmelCase_ : List[Any] = unet_config['''block_out_channels''']
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Union[str, Any] = channels_list[0]
for i, layer_type in enumerate(__lowercase ):
UpperCAmelCase_ : Optional[int] = channels_list[i]
UpperCAmelCase_ : Optional[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__lowercase ):
UpperCAmelCase_ : List[Any] = F'''down_blocks.{i}.resnets.{j}'''
UpperCAmelCase_ : Tuple = F'''input_blocks.{current_layer}.0'''
UpperCAmelCase_ : int = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ : Any = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase , has_skip=__lowercase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__lowercase ):
UpperCAmelCase_ : Dict = F'''down_blocks.{i}.resnets.{j}'''
UpperCAmelCase_ : int = F'''input_blocks.{current_layer}.0'''
UpperCAmelCase_ : Optional[int] = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ : Tuple = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase , has_skip=__lowercase )
UpperCAmelCase_ : Optional[Any] = F'''down_blocks.{i}.attentions.{j}'''
UpperCAmelCase_ : Optional[Any] = F'''input_blocks.{current_layer}.1'''
UpperCAmelCase_ : List[Any] = convert_attention(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
UpperCAmelCase_ : Any = F'''down_blocks.{i}.downsamplers.0'''
UpperCAmelCase_ : str = F'''input_blocks.{current_layer}.0'''
UpperCAmelCase_ : List[str] = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
current_layer += 1
UpperCAmelCase_ : str = current_channels
# hardcoded the mid-block for now
UpperCAmelCase_ : Optional[int] = '''mid_block.resnets.0'''
UpperCAmelCase_ : List[str] = '''middle_block.0'''
UpperCAmelCase_ : List[Any] = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
UpperCAmelCase_ : Optional[Any] = '''mid_block.attentions.0'''
UpperCAmelCase_ : int = '''middle_block.1'''
UpperCAmelCase_ : List[Any] = convert_attention(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
UpperCAmelCase_ : Optional[Any] = '''mid_block.resnets.1'''
UpperCAmelCase_ : Union[str, Any] = '''middle_block.2'''
UpperCAmelCase_ : Optional[Any] = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : List[Any] = unet_config['''up_block_types''']
for i, layer_type in enumerate(__lowercase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ : Tuple = F'''up_blocks.{i}.resnets.{j}'''
UpperCAmelCase_ : List[str] = F'''output_blocks.{current_layer}.0'''
UpperCAmelCase_ : Optional[int] = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase , has_skip=__lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
UpperCAmelCase_ : Optional[Any] = F'''up_blocks.{i}.upsamplers.0'''
UpperCAmelCase_ : Tuple = F'''output_blocks.{current_layer-1}.1'''
UpperCAmelCase_ : Tuple = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ : Tuple = F'''up_blocks.{i}.resnets.{j}'''
UpperCAmelCase_ : Tuple = F'''output_blocks.{current_layer}.0'''
UpperCAmelCase_ : Optional[Any] = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase , has_skip=__lowercase )
UpperCAmelCase_ : Optional[Any] = F'''up_blocks.{i}.attentions.{j}'''
UpperCAmelCase_ : Union[str, Any] = F'''output_blocks.{current_layer}.1'''
UpperCAmelCase_ : Union[str, Any] = convert_attention(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
UpperCAmelCase_ : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
UpperCAmelCase_ : Tuple = F'''output_blocks.{current_layer-1}.2'''
UpperCAmelCase_ : Dict = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
UpperCAmelCase_ : str = checkpoint['''out.0.weight''']
UpperCAmelCase_ : Optional[Any] = checkpoint['''out.0.bias''']
UpperCAmelCase_ : Union[str, Any] = checkpoint['''out.2.weight''']
UpperCAmelCase_ : int = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
__UpperCamelCase : str = parser.parse_args()
__UpperCamelCase : Optional[Any] = strabool(args.class_cond)
__UpperCamelCase : Optional[Any] = os.path.basename(args.unet_path)
print(F'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
__UpperCamelCase : Dict = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__UpperCamelCase : Optional[Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__UpperCamelCase : Optional[Any] = TEST_UNET_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
__UpperCamelCase : List[Any] = None
__UpperCamelCase : Optional[Any] = con_pt_to_diffuser(args.unet_path, unet_config)
__UpperCamelCase : Optional[int] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__UpperCamelCase : Optional[Any] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__UpperCamelCase : List[str] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__UpperCamelCase : Optional[Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
__UpperCamelCase : List[str] = CMStochasticIterativeScheduler(**scheduler_config)
__UpperCamelCase : List[Any] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 707
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : int
A_ : int
A_ : float
A_ : float
A_ : Tuple[int]
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = torch.arange(self.height * self.width )
UpperCAmelCase_ : Any = torch.stack(
[
pixel_indices % self.width,
torch.div(__snake_case , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ : List[Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(__snake_case ) )
UpperCAmelCase_ : str = self.get_image_coords()
UpperCAmelCase_ : List[str] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Tuple = self.get_camera_rays(__snake_case )
UpperCAmelCase_ : Union[str, Any] = rays.view(__snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _lowerCamelCase ( self : Dict , __snake_case : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : int = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : str = coords.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = self.resolution()
UpperCAmelCase_ : Optional[Any] = self.fov()
UpperCAmelCase_ : int = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[Any] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Optional[Any] = fracs.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(__snake_case , 1 , 3 )
+ self.x.view(__snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ : Optional[int] = torch.stack(
[
torch.broadcast_to(self.origin.view(__snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__snake_case , *__snake_case , 2 , 3 )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__snake_case , height=__snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : Tuple = np.array([np.sin(__lowercase ), np.cos(__lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : str = -z * 4
UpperCAmelCase_ : List[Any] = np.array([np.cos(__lowercase ), -np.sin(__lowercase ), 0.0] )
UpperCAmelCase_ : Tuple = np.cross(__lowercase , __lowercase )
origins.append(__lowercase )
xs.append(__lowercase )
ys.append(__lowercase )
zs.append(__lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , width=__lowercase , height=__lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowercase )) , )
| 641
| 0
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
UpperCAmelCase_ : Optional[Any] = TOKENIZER_CLASSES
else:
UpperCAmelCase_ : Any = {tokenizer_name: getattr(__lowercase , tokenizer_name + '''Fast''' )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
UpperCAmelCase_ : str = TOKENIZER_CLASSES[tokenizer_name]
UpperCAmelCase_ : str = True
if checkpoint_name is None:
UpperCAmelCase_ : int = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCAmelCase_ : List[str] = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
UpperCAmelCase_ : List[str] = tokenizer_class.from_pretrained(__lowercase , force_download=__lowercase )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCAmelCase_ : Optional[Any] = checkpoint.split('''/''' )
UpperCAmelCase_ : List[Any] = os.path.join(__lowercase , __lowercase )
elif add_prefix:
UpperCAmelCase_ : Union[str, Any] = checkpoint
UpperCAmelCase_ : Tuple = dump_path
else:
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Union[str, Any] = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCAmelCase_ : Dict = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCAmelCase_ : Optional[Any] = file_path.split(__lowercase )[-1][0]
if next_char == "/":
UpperCAmelCase_ : Union[str, Any] = os.path.join(__lowercase , __lowercase )
UpperCAmelCase_ : Union[str, Any] = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
UpperCAmelCase_ : Optional[int] = tokenizer.save_pretrained(
__lowercase , legacy_format=__lowercase , filename_prefix=__lowercase )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(__lowercase )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
__UpperCamelCase : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 708
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__UpperCamelCase : Dict = logging.getLogger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = 'token-classification'
def __init__( self : Any , __snake_case : Optional[Any] ):
'''simple docstring'''
if type(__snake_case ) == dict:
UpperCAmelCase_ : Tuple = Namespace(**__snake_case )
UpperCAmelCase_ : Dict = import_module('''tasks''' )
try:
UpperCAmelCase_ : int = getattr(__snake_case , hparams.task_type )
UpperCAmelCase_ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
UpperCAmelCase_ : int = self.token_classification_task.get_labels(hparams.labels )
UpperCAmelCase_ : Dict = CrossEntropyLoss().ignore_index
super().__init__(__snake_case , len(self.labels ) , self.mode )
def _lowerCamelCase ( self : Optional[int] , **__snake_case : Optional[Any] ):
'''simple docstring'''
return self.model(**__snake_case )
def _lowerCamelCase ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Union[str, Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : str = self(**__snake_case )
UpperCAmelCase_ : Any = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.hparams
for mode in ["train", "dev", "test"]:
UpperCAmelCase_ : Optional[Any] = self._feature_file(__snake_case )
if os.path.exists(__snake_case ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Any = torch.load(__snake_case )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
UpperCAmelCase_ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , __snake_case )
UpperCAmelCase_ : List[str] = self.token_classification_task.convert_examples_to_features(
__snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__snake_case , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , __snake_case )
torch.save(__snake_case , __snake_case )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int , __snake_case : bool = False ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self._feature_file(__snake_case )
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Optional[int] = torch.load(__snake_case )
UpperCAmelCase_ : Any = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCAmelCase_ : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCAmelCase_ : Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCAmelCase_ : Optional[int] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCAmelCase_ : int = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__snake_case , __snake_case , __snake_case , __snake_case ) , batch_size=__snake_case )
def _lowerCamelCase ( self : List[Any] , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
"""Compute validation""" ""
UpperCAmelCase_ : str = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Any = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : int = self(**__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs[:2]
UpperCAmelCase_ : Optional[int] = logits.detach().cpu().numpy()
UpperCAmelCase_ : List[Any] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self : List[str] , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
UpperCAmelCase_ : Dict = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Any = np.argmax(__snake_case , axis=2 )
UpperCAmelCase_ : int = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Tuple = dict(enumerate(self.labels ) )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase_ : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCAmelCase_ : Union[str, Any] = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(__snake_case , __snake_case ),
'''precision''': precision_score(__snake_case , __snake_case ),
'''recall''': recall_score(__snake_case , __snake_case ),
'''f1''': fa_score(__snake_case , __snake_case ),
}
UpperCAmelCase_ : str = dict(results.items() )
UpperCAmelCase_ : List[Any] = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self : List[str] , __snake_case : int ):
'''simple docstring'''
# when stable
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = self._eval_end(__snake_case )
UpperCAmelCase_ : int = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self : List[Any] , __snake_case : Tuple ):
'''simple docstring'''
# updating to test_epoch_end instead of deprecated test_end
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._eval_end(__snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCAmelCase_ : Optional[Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
# Add NER specific options
BaseTransformer.add_model_specific_args(__snake_case , __snake_case )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=__snake_case , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__snake_case , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=__snake_case , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__snake_case , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__UpperCamelCase : Dict = NERTransformer.add_model_specific_args(parser, os.getcwd())
__UpperCamelCase : Tuple = parser.parse_args()
__UpperCamelCase : Optional[Any] = NERTransformer(args)
__UpperCamelCase : int = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__UpperCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
__UpperCamelCase : List[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 641
| 0
|
def snake_case_ ( __lowercase , __lowercase , __lowercase=False ):
if isinstance(__lowercase , __lowercase ) and isinstance(__lowercase , __lowercase ):
UpperCAmelCase_ : Tuple = len(set_a.intersection(__lowercase ) )
if alternative_union:
UpperCAmelCase_ : List[Any] = len(__lowercase ) + len(__lowercase )
else:
UpperCAmelCase_ : Tuple = len(set_a.union(__lowercase ) )
return intersection / union
if isinstance(__lowercase , (list, tuple) ) and isinstance(__lowercase , (list, tuple) ):
UpperCAmelCase_ : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
UpperCAmelCase_ : Tuple = len(__lowercase ) + len(__lowercase )
return len(__lowercase ) / union
else:
UpperCAmelCase_ : Union[str, Any] = set_a + [element for element in set_b if element not in set_a]
return len(__lowercase ) / len(__lowercase )
return len(__lowercase ) / len(__lowercase )
return None
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = {'a', 'b', 'c', 'd', 'e'}
__UpperCamelCase : Optional[Any] = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 709
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'encoder-decoder'
A_ : Optional[int] = True
def __init__( self : Dict , **__snake_case : Union[str, Any] ):
'''simple docstring'''
super().__init__(**__snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCAmelCase_ : int = kwargs.pop('''encoder''' )
UpperCAmelCase_ : List[Any] = encoder_config.pop('''model_type''' )
UpperCAmelCase_ : int = kwargs.pop('''decoder''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Optional[int] = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : List[Any] = True
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Union[str, Any] ):
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Tuple = self.encoder.to_dict()
UpperCAmelCase_ : Tuple = self.decoder.to_dict()
UpperCAmelCase_ : Tuple = self.__class__.model_type
return output
| 641
| 0
|
def snake_case_ ( __lowercase = 1 , __lowercase = 1_0_0_0 ):
UpperCAmelCase_ : Any = 1
UpperCAmelCase_ : str = 0
for divide_by_number in range(__lowercase , digit + 1 ):
UpperCAmelCase_ : list[int] = []
UpperCAmelCase_ : int = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowercase ):
UpperCAmelCase_ : Union[str, Any] = len(__lowercase )
UpperCAmelCase_ : str = divide_by_number
else:
has_been_divided.append(__lowercase )
UpperCAmelCase_ : Union[str, Any] = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def snake_case_ ( ):
UpperCAmelCase_ : int = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
UpperCAmelCase_ : Dict = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('''RGB''' )
return image
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Any = dct.pop(__lowercase )
UpperCAmelCase_ : Optional[Any] = val
def snake_case_ ( __lowercase , __lowercase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCAmelCase_ : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCAmelCase_ : int = torch.cat((q_bias, torch.zeros_like(__lowercase , requires_grad=__lowercase ), v_bias) )
UpperCAmelCase_ : List[str] = qkv_bias
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = 3_6_4 if '''coco''' in model_name else 2_2_4
UpperCAmelCase_ : Any = BlipaVisionConfig(image_size=__lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase_ : Any = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=__lowercase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase_ : List[str] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=__lowercase ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase_ : List[str] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase_ : Any = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
UpperCAmelCase_ : List[Any] = BlipaConfig(vision_config=__lowercase , text_config=__lowercase )
return config, image_size
@torch.no_grad()
def snake_case_ ( __lowercase , __lowercase=None , __lowercase=False ):
UpperCAmelCase_ : List[Any] = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
UpperCAmelCase_ : List[str] = tokenizer('''\n''' , add_special_tokens=__lowercase ).input_ids[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = get_blipa_config(__lowercase , eos_token_id=__lowercase )
UpperCAmelCase_ : List[Any] = BlipaForConditionalGeneration(__lowercase ).eval()
UpperCAmelCase_ : Tuple = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = load_model_and_preprocess(
name=__lowercase , model_type=__lowercase , is_eval=__lowercase , device=__lowercase )
original_model.eval()
print('''Done!''' )
# update state dict keys
UpperCAmelCase_ : Optional[Any] = original_model.state_dict()
UpperCAmelCase_ : List[Any] = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(__lowercase )
if key.startswith('''Qformer.bert''' ):
UpperCAmelCase_ : Tuple = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
UpperCAmelCase_ : Optional[Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
UpperCAmelCase_ : Any = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
UpperCAmelCase_ : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
UpperCAmelCase_ : Any = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
UpperCAmelCase_ : Optional[Any] = key.replace('''t5''' , '''language''' )
UpperCAmelCase_ : List[str] = val
# read in qv biases
read_in_q_v_bias(__lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = hf_model.load_state_dict(__lowercase , strict=__lowercase )
assert len(__lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase_ : str = load_demo_image()
UpperCAmelCase_ : Any = vis_processors['''eval'''](__lowercase ).unsqueeze(0 ).to(__lowercase )
UpperCAmelCase_ : Optional[Any] = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(__lowercase )
# create processor
UpperCAmelCase_ : Optional[int] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=__lowercase , image_std=__lowercase )
UpperCAmelCase_ : Tuple = BlipaProcessor(image_processor=__lowercase , tokenizer=__lowercase )
UpperCAmelCase_ : str = processor(images=__lowercase , return_tensors='''pt''' ).pixel_values.to(__lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowercase , __lowercase )
original_model.to(__lowercase )
hf_model.to(__lowercase )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase_ : Tuple = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
UpperCAmelCase_ : Optional[int] = hf_model(__lowercase , __lowercase ).logits
else:
UpperCAmelCase_ : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
UpperCAmelCase_ : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
UpperCAmelCase_ : int = hf_model(__lowercase , __lowercase , labels=__lowercase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=__lowercase )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=__lowercase )
else:
# cast to same type
UpperCAmelCase_ : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(__lowercase ) , __lowercase , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
UpperCAmelCase_ : Union[str, Any] = ''''''
UpperCAmelCase_ : Optional[Any] = tokenizer(__lowercase , return_tensors='''pt''' ).input_ids.to(__lowercase )
UpperCAmelCase_ : int = original_model.generate({'''image''': original_pixel_values} )
UpperCAmelCase_ : Optional[int] = hf_model.generate(
__lowercase , __lowercase , do_sample=__lowercase , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , __lowercase )
UpperCAmelCase_ : Tuple = input_ids.shape[1]
UpperCAmelCase_ : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowercase )
UpperCAmelCase_ : Optional[int] = [text.strip() for text in output_text]
print('''HF generation:''' , __lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowercase )
hf_model.save_pretrained(__lowercase )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
__UpperCamelCase : Optional[Any] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__UpperCamelCase : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 641
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Union[str, Any] = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 711
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : int = multiprocessing.Manager()
UpperCAmelCase_ : Union[str, Any] = manager.list()
UpperCAmelCase_ : int = multiprocessing.Process(target=__lowercase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
UpperCAmelCase_ : str = shutil.rmtree
UpperCAmelCase_ : Tuple = os.rmdir
UpperCAmelCase_ : Dict = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
UpperCAmelCase_ : Optional[int] = {}
with swallow_io():
with time_limit(__lowercase ):
exec(__lowercase , __lowercase )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(F'''failed: {e}''' )
# Needed for cleaning up.
UpperCAmelCase_ : Optional[int] = rmtree
UpperCAmelCase_ : Optional[Any] = rmdir
UpperCAmelCase_ : Optional[Any] = chdir
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
def signal_handler(__lowercase , __lowercase ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , __lowercase )
signal.signal(signal.SIGALRM , __lowercase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def snake_case_ ( ):
UpperCAmelCase_ : Optional[Any] = WriteOnlyStringIO()
with contextlib.redirect_stdout(__lowercase ):
with contextlib.redirect_stderr(__lowercase ):
with redirect_stdin(__lowercase ):
yield
@contextlib.contextmanager
def snake_case_ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(__lowercase ):
yield dirname
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
pass
class lowerCAmelCase__( io.StringIO ):
'''simple docstring'''
def _lowerCamelCase ( self : Dict , *__snake_case : List[Any] , **__snake_case : int ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Dict , *__snake_case : int , **__snake_case : Any ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : int , *__snake_case : List[str] , **__snake_case : Optional[Any] ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
'''simple docstring'''
return False
class lowerCAmelCase__( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
A_ : Optional[Any] = 'stdin'
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
if root == ".":
yield
return
UpperCAmelCase_ : Tuple = os.getcwd()
os.chdir(__lowercase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__lowercase )
def snake_case_ ( __lowercase=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = None
import os
UpperCAmelCase_ : Union[str, Any] = '''1'''
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
import shutil
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
import subprocess
UpperCAmelCase_ : Dict = None # type: ignore
UpperCAmelCase_ : Union[str, Any] = None
import sys
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
| 641
| 0
|
def snake_case_ ( __lowercase , __lowercase ) -> Tuple:
return int((input_a, input_a).count(0 ) == 0 )
def snake_case_ ( ) -> Optional[int]:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : int = 'falcon'
A_ : int = ['past_key_values']
def __init__( self : Optional[Any] , __snake_case : Tuple=65_024 , __snake_case : List[str]=4_544 , __snake_case : Optional[Any]=32 , __snake_case : Any=71 , __snake_case : str=1E-5 , __snake_case : List[str]=0.02 , __snake_case : List[Any]=True , __snake_case : Dict=0.0 , __snake_case : Optional[Any]=0.0 , __snake_case : Any=None , __snake_case : List[Any]=False , __snake_case : Dict=False , __snake_case : Optional[int]=True , __snake_case : List[Any]=True , __snake_case : Optional[Any]=False , __snake_case : Dict=11 , __snake_case : List[str]=11 , **__snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : int = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ : Union[str, Any] = kwargs.pop('''n_embed''' , __snake_case )
UpperCAmelCase_ : str = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : Optional[int] = layer_norm_epsilon
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[int] = use_cache
UpperCAmelCase_ : List[Any] = hidden_dropout
UpperCAmelCase_ : Any = attention_dropout
UpperCAmelCase_ : Tuple = bos_token_id
UpperCAmelCase_ : List[Any] = eos_token_id
UpperCAmelCase_ : Any = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ : Optional[int] = alibi
UpperCAmelCase_ : Dict = new_decoder_architecture
UpperCAmelCase_ : List[Any] = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ : Tuple = parallel_attn
UpperCAmelCase_ : List[Any] = bias
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return not self.alibi
| 641
| 0
|
'''simple docstring'''
import string
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Optional[int] = ''''''
for i in sequence:
UpperCAmelCase_ : Optional[Any] = ord(__lowercase )
if 6_5 <= extract <= 9_0:
output += chr(1_5_5 - extract )
elif 9_7 <= extract <= 1_2_2:
output += chr(2_1_9 - extract )
else:
output += i
return output
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Union[str, Any] = string.ascii_letters
UpperCAmelCase_ : int = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(__lowercase )] if c in letters else c for c in sequence )
def snake_case_ ( ):
from timeit import timeit
print('''Running performance benchmarks...''' )
UpperCAmelCase_ : int = '''from string import printable ; from __main__ import atbash, atbash_slow'''
print(F'''> atbash_slow(): {timeit('atbash_slow(printable)' , setup=__lowercase )} seconds''' )
print(F'''> atbash(): {timeit('atbash(printable)' , setup=__lowercase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'{example} encrypted in atbash: {atbash(example)}')
benchmark()
| 713
|
def snake_case_ ( __lowercase ):
return " ".join(
''''''.join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 641
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Dict = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__UpperCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 714
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase : str = 'true'
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=1_6 ):
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = RegressionModel()
UpperCAmelCase_ : Optional[int] = deepcopy(__lowercase )
UpperCAmelCase_ : Union[str, Any] = RegressionDataset(length=__lowercase )
UpperCAmelCase_ : Any = DataLoader(__lowercase , batch_size=__lowercase )
model.to(accelerator.device )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.prepare(__lowercase , __lowercase )
return model, ddp_model, dataloader
def snake_case_ ( __lowercase , __lowercase=False ):
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
UpperCAmelCase_ : List[Any] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(__lowercase ):
UpperCAmelCase_ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowercase , max_length=__lowercase )
return outputs
with accelerator.main_process_first():
UpperCAmelCase_ : List[str] = dataset.map(
__lowercase , batched=__lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
UpperCAmelCase_ : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowercase ):
if use_longest:
return tokenizer.pad(__lowercase , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(__lowercase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return DataLoader(__lowercase , shuffle=__lowercase , collate_fn=__lowercase , batch_size=1_6 )
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[int] = Accelerator(dispatch_batches=__lowercase , split_batches=__lowercase )
UpperCAmelCase_ : int = get_dataloader(__lowercase , not dispatch_batches )
UpperCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(__lowercase , __lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Dict = []
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = batch.values()
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase_ , UpperCAmelCase_ : Any = [], []
for logit, targ in logits_and_targets:
logits.append(__lowercase )
targs.append(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = torch.cat(__lowercase ), torch.cat(__lowercase )
return logits, targs
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=False , __lowercase=False , __lowercase=1_6 ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = get_basic_setup(__lowercase , __lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = generate_predictions(__lowercase , __lowercase , __lowercase )
assert (
len(__lowercase ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__lowercase )}'''
def snake_case_ ( __lowercase = False , __lowercase = False ):
UpperCAmelCase_ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_mrpc_setup(__lowercase , __lowercase )
# First do baseline
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = setup['''no''']
model.to(__lowercase )
model.eval()
for batch in dataloader:
batch.to(__lowercase )
with torch.inference_mode():
UpperCAmelCase_ : str = model(**__lowercase )
UpperCAmelCase_ : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__lowercase , references=batch['''labels'''] )
UpperCAmelCase_ : Optional[int] = metric.compute()
# Then do distributed
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase_ : Optional[int] = model(**__lowercase )
UpperCAmelCase_ : int = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : Optional[int] = batch['''labels''']
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__lowercase , references=__lowercase )
UpperCAmelCase_ : Dict = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def snake_case_ ( ):
UpperCAmelCase_ : str = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__lowercase , __lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase_ : Optional[Any] = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__lowercase , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
UpperCAmelCase_ : List[Any] = Accelerator()
test_torch_metrics(__lowercase , 5_1_2 )
accelerator.state._reset_state()
def snake_case_ ( __lowercase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 641
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[Any] , __snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : Dict = 13
UpperCAmelCase_ : Any = 7
UpperCAmelCase_ : str = True
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : Any = 2
UpperCAmelCase_ : List[Any] = 99
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : Optional[int] = 32
UpperCAmelCase_ : Tuple = 2
UpperCAmelCase_ : List[str] = 4
UpperCAmelCase_ : List[Any] = 0.1
UpperCAmelCase_ : Union[str, Any] = 0.1
UpperCAmelCase_ : Union[str, Any] = 512
UpperCAmelCase_ : Any = 16
UpperCAmelCase_ : Dict = 2
UpperCAmelCase_ : Union[str, Any] = 0.02
UpperCAmelCase_ : Any = 3
UpperCAmelCase_ : str = 4
UpperCAmelCase_ : Any = '''last'''
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Optional[Any] = 0
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCAmelCase_ : int = None
if self.use_input_lengths:
UpperCAmelCase_ : Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase_ : str = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Dict = None
if self.use_labels:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : str = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : Union[str, Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCamelCase ( self : List[Any] , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : int , __snake_case : List[Any] , __snake_case : str , __snake_case : List[Any] , __snake_case : Tuple , ):
'''simple docstring'''
UpperCAmelCase_ : Any = TFFlaubertModel(config=__snake_case )
UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : str = [input_ids, input_mask]
UpperCAmelCase_ : List[str] = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : int , __snake_case : Optional[int] , __snake_case : int , __snake_case : Any , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ : int = TFFlaubertWithLMHeadModel(__snake_case )
UpperCAmelCase_ : Optional[int] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCAmelCase_ : int = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Dict , ):
'''simple docstring'''
UpperCAmelCase_ : str = TFFlaubertForQuestionAnsweringSimple(__snake_case )
UpperCAmelCase_ : List[str] = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCAmelCase_ : int = model(__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : str , __snake_case : Optional[int] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = TFFlaubertForSequenceClassification(__snake_case )
UpperCAmelCase_ : Tuple = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCAmelCase_ : Tuple = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : Tuple , __snake_case : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Dict , __snake_case : List[Any] , ):
'''simple docstring'''
UpperCAmelCase_ : int = self.num_labels
UpperCAmelCase_ : Tuple = TFFlaubertForTokenClassification(config=__snake_case )
UpperCAmelCase_ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase_ : List[str] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.num_choices
UpperCAmelCase_ : List[str] = TFFlaubertForMultipleChoice(config=__snake_case )
UpperCAmelCase_ : List[str] = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ : Optional[int] = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ : List[Any] = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ : str = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCAmelCase_ : Dict = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
UpperCAmelCase_
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Optional[int] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : List[Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A_ : str = (
{
'feature-extraction': TFFlaubertModel,
'fill-mask': TFFlaubertWithLMHeadModel,
'question-answering': TFFlaubertForQuestionAnsweringSimple,
'text-classification': TFFlaubertForSequenceClassification,
'token-classification': TFFlaubertForTokenClassification,
'zero-shot': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : Any = False
A_ : Any = False
def _lowerCamelCase ( self : Tuple , __snake_case : Tuple , __snake_case : Any , __snake_case : str , __snake_case : int , __snake_case : Optional[Any] ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = TFFlaubertModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__snake_case )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__snake_case )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__snake_case )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__snake_case )
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = TFFlaubertModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
UpperCAmelCase_ : int = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCAmelCase_ : Any = model(__snake_case )[0]
UpperCAmelCase_ : Dict = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __snake_case )
# compare the actual values for a slice.
UpperCAmelCase_ : List[str] = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 715
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 0
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__snake_case , '''width_multiplier''' ) )
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : str , __snake_case : Union[str, Any] , __snake_case : int=13 , __snake_case : Tuple=64 , __snake_case : str=2 , __snake_case : Dict=3 , __snake_case : List[Any]="swish" , __snake_case : Optional[int]=3 , __snake_case : Any=32 , __snake_case : Union[str, Any]=0.1 , __snake_case : Optional[int]=0.02 , __snake_case : str=True , __snake_case : Dict=True , __snake_case : Optional[Any]=10 , __snake_case : str=None , __snake_case : Tuple=0.25 , __snake_case : Dict=0.0 , __snake_case : Any=0.0 , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : List[str] = image_size
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : List[Any] = make_divisible(512 * width_multiplier , divisor=8 )
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : Union[str, Any] = conv_kernel_size
UpperCAmelCase_ : Optional[int] = output_stride
UpperCAmelCase_ : Optional[int] = classifier_dropout_prob
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Dict = scope
UpperCAmelCase_ : Optional[int] = width_multiplier
UpperCAmelCase_ : int = ffn_dropout
UpperCAmelCase_ : Dict = attn_dropout
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : int = None
UpperCAmelCase_ : List[str] = None
if self.use_labels:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def _lowerCamelCase ( self : Any , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = MobileViTVaModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : str = MobileViTVaForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : str , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.num_labels
UpperCAmelCase_ : int = MobileViTVaForSemanticSegmentation(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase_ : Optional[Any] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.prepare_config_and_inputs()
UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
A_ : Union[str, Any] = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ : Union[str, Any] = False
A_ : List[str] = False
A_ : List[str] = False
A_ : List[Any] = False
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = MobileViTVaModelTester(self )
UpperCAmelCase_ : int = MobileViTVaConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : str = model_class(__snake_case )
UpperCAmelCase_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Dict = [*signature.parameters.keys()]
UpperCAmelCase_ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(__snake_case : Optional[int] , __snake_case : Any , __snake_case : Union[str, Any] ):
UpperCAmelCase_ : Optional[int] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : int = model(**self._prepare_for_class(__snake_case , __snake_case ) )
UpperCAmelCase_ : str = outputs.hidden_states
UpperCAmelCase_ : Union[str, Any] = 5
self.assertEqual(len(__snake_case ) , __snake_case )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase_ : List[str] = 2
for i in range(len(__snake_case ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__snake_case )
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = MobileViTVaModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def snake_case_ ( ):
UpperCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
__snake_case )
UpperCAmelCase_ : Union[str, Any] = self.default_image_processor
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : Optional[int] = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**__snake_case )
# verify the logits
UpperCAmelCase_ : List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __snake_case )
UpperCAmelCase_ : Tuple = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
UpperCAmelCase_ : Union[str, Any] = model.to(__snake_case )
UpperCAmelCase_ : List[str] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
UpperCAmelCase_ : List[Any] = prepare_img()
UpperCAmelCase_ : Any = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**__snake_case )
UpperCAmelCase_ : Dict = outputs.logits
# verify the logits
UpperCAmelCase_ : Optional[Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __snake_case )
UpperCAmelCase_ : Optional[int] = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=__snake_case , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
UpperCAmelCase_ : Optional[int] = model.to(__snake_case )
UpperCAmelCase_ : str = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
UpperCAmelCase_ : Optional[int] = prepare_img()
UpperCAmelCase_ : Tuple = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : str = model(**__snake_case )
UpperCAmelCase_ : Any = outputs.logits.detach().cpu()
UpperCAmelCase_ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__snake_case , target_sizes=[(50, 60)] )
UpperCAmelCase_ : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __snake_case )
UpperCAmelCase_ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__snake_case )
UpperCAmelCase_ : str = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __snake_case )
| 716
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : List[str] , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = data
UpperCAmelCase_ : List[Any] = [0X67_45_23_01, 0Xef_cd_ab_89, 0X98_ba_dc_fe, 0X10_32_54_76, 0Xc3_d2_e1_f0]
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xff_ff_ff_ff
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase_ : Union[str, Any] = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _lowerCamelCase ( self : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Any = list(struct.unpack('''>16L''' , __snake_case ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase_ : str = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.padding()
UpperCAmelCase_ : str = self.split_blocks()
for block in self.blocks:
UpperCAmelCase_ : Any = self.expand_block(__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase_ : Optional[Any] = (b & c) | ((~b) & d)
UpperCAmelCase_ : Optional[Any] = 0X5a_82_79_99
elif 20 <= i < 40:
UpperCAmelCase_ : List[Any] = b ^ c ^ d
UpperCAmelCase_ : str = 0X6e_d9_eb_a1
elif 40 <= i < 60:
UpperCAmelCase_ : str = (b & c) | (b & d) | (c & d)
UpperCAmelCase_ : Optional[int] = 0X8f_1b_bc_dc
elif 60 <= i < 80:
UpperCAmelCase_ : Union[str, Any] = b ^ c ^ d
UpperCAmelCase_ : Dict = 0Xca_62_c1_d6
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = (
self.rotate(__snake_case , 5 ) + f + e + k + expanded_block[i] & 0Xff_ff_ff_ff,
a,
self.rotate(__snake_case , 30 ),
c,
d,
)
UpperCAmelCase_ : Optional[Any] = (
self.h[0] + a & 0Xff_ff_ff_ff,
self.h[1] + b & 0Xff_ff_ff_ff,
self.h[2] + c & 0Xff_ff_ff_ff,
self.h[3] + d & 0Xff_ff_ff_ff,
self.h[4] + e & 0Xff_ff_ff_ff,
)
return ("{:08x}" * 5).format(*self.h )
def snake_case_ ( ):
UpperCAmelCase_ : Tuple = B'''Test String'''
assert SHAaHash(__lowercase ).final_hash() == hashlib.shaa(__lowercase ).hexdigest() # noqa: S324
def snake_case_ ( ):
UpperCAmelCase_ : int = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
UpperCAmelCase_ : List[Any] = parser.parse_args()
UpperCAmelCase_ : Optional[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
UpperCAmelCase_ : List[str] = f.read()
else:
UpperCAmelCase_ : Tuple = bytes(__lowercase , '''utf-8''' )
print(SHAaHash(__lowercase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 641
| 0
|
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Any = 'EncodecFeatureExtractor'
A_ : Dict = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : Any , __snake_case : Tuple , __snake_case : Any ):
'''simple docstring'''
super().__init__(__snake_case , __snake_case )
UpperCAmelCase_ : Tuple = self.feature_extractor
UpperCAmelCase_ : int = False
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Optional[Any]=None , __snake_case : Any=None , __snake_case : Tuple=True ):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=__snake_case , language=__snake_case , no_timestamps=__snake_case )
def __call__( self : Optional[Any] , *__snake_case : List[Any] , **__snake_case : str ):
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
UpperCAmelCase_ : int = kwargs.pop('''audio''' , __snake_case )
UpperCAmelCase_ : Dict = kwargs.pop('''sampling_rate''' , __snake_case )
UpperCAmelCase_ : str = kwargs.pop('''text''' , __snake_case )
if len(__snake_case ) > 0:
UpperCAmelCase_ : Tuple = args[0]
UpperCAmelCase_ : List[str] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
UpperCAmelCase_ : Tuple = self.tokenizer(__snake_case , **__snake_case )
if audio is not None:
UpperCAmelCase_ : Optional[int] = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
UpperCAmelCase_ : Optional[int] = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
UpperCAmelCase_ : Dict = audio_inputs['''padding_mask''']
return inputs
def _lowerCamelCase ( self : int , *__snake_case : Tuple , **__snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = kwargs.pop('''audio''' , __snake_case )
UpperCAmelCase_ : Optional[Any] = kwargs.pop('''padding_mask''' , __snake_case )
if len(__snake_case ) > 0:
UpperCAmelCase_ : Optional[int] = args[0]
UpperCAmelCase_ : int = args[1:]
if audio_values is not None:
return self._decode_audio(__snake_case , padding_mask=__snake_case )
else:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : Optional[int] , *__snake_case : str , **__snake_case : str ):
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : Tuple , __snake_case : int , __snake_case : Optional = None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = to_numpy(__snake_case )
UpperCAmelCase_ : Any = audio_values.shape
if padding_mask is None:
return list(__snake_case )
UpperCAmelCase_ : Tuple = to_numpy(__snake_case )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
UpperCAmelCase_ : Dict = seq_len - padding_mask.shape[-1]
UpperCAmelCase_ : Optional[Any] = 1 - self.feature_extractor.padding_value
UpperCAmelCase_ : str = np.pad(__snake_case , ((0, 0), (0, difference)) , '''constant''' , constant_values=__snake_case )
UpperCAmelCase_ : List[Any] = audio_values.tolist()
for i in range(__snake_case ):
UpperCAmelCase_ : Union[str, Any] = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
UpperCAmelCase_ : Any = sliced_audio.reshape(__snake_case , -1 )
return audio_values
| 717
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = 'timesformer'
def __init__( self : int , __snake_case : Any=224 , __snake_case : str=16 , __snake_case : Any=3 , __snake_case : List[Any]=8 , __snake_case : Dict=768 , __snake_case : Dict=12 , __snake_case : Tuple=12 , __snake_case : Dict=3_072 , __snake_case : str="gelu" , __snake_case : Union[str, Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.02 , __snake_case : Optional[Any]=1E-6 , __snake_case : List[Any]=True , __snake_case : List[str]="divided_space_time" , __snake_case : Optional[int]=0 , **__snake_case : Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : int = num_frames
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Dict = attention_type
UpperCAmelCase_ : str = drop_path_rate
| 641
| 0
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def snake_case_ ( __lowercase : Optional[Any] ):
return (data["data"], data["target"])
def snake_case_ ( __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : List[Any] ):
UpperCAmelCase_ : Any = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(__lowercase , __lowercase )
# Predict target for test data
UpperCAmelCase_ : List[Any] = xgb.predict(__lowercase )
UpperCAmelCase_ : List[Any] = predictions.reshape(len(__lowercase ) , 1 )
return predictions
def snake_case_ ( ):
UpperCAmelCase_ : str = fetch_california_housing()
UpperCAmelCase_ : List[str] = data_handling(__lowercase )
UpperCAmelCase_ : Optional[int] = train_test_split(
__lowercase , __lowercase , test_size=0.2_5 , random_state=1 )
UpperCAmelCase_ : Tuple = xgboost(__lowercase , __lowercase , __lowercase )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(__lowercase , __lowercase )}''' )
print(F'''Mean Square Error : {mean_squared_error(__lowercase , __lowercase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 718
|
import math
import qiskit
def snake_case_ ( __lowercase = 1 , __lowercase = 1 , __lowercase = 1 ):
if (
isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
UpperCAmelCase_ : Any = qiskit.QuantumRegister(4 , '''qr''' )
UpperCAmelCase_ : List[str] = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
UpperCAmelCase_ : Any = [input_a, input_a, carry_in]
UpperCAmelCase_ : Dict = qiskit.QuantumCircuit(__lowercase , __lowercase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__lowercase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__lowercase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__lowercase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __lowercase ) # measure the last two qbits
UpperCAmelCase_ : Optional[int] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCAmelCase_ : List[str] = qiskit.execute(__lowercase , __lowercase , shots=1_0_0_0 )
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 641
| 0
|
def snake_case_ ( __lowercase ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Any = len(__lowercase ) # No of vertices in graph
UpperCAmelCase_ : Optional[int] = [0] * n
UpperCAmelCase_ : List[str] = [False] * n
def dfs(__lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : int = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__lowercase , __lowercase , __lowercase , id_ )
UpperCAmelCase_ : Optional[int] = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
UpperCAmelCase_ : str = min(low[at] , low[to] )
UpperCAmelCase_ : list[tuple[int, int]] = []
for i in range(__lowercase ):
if not visited[i]:
dfs(__lowercase , -1 , __lowercase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[str] = 'gpt_bigcode'
A_ : Optional[Any] = ['past_key_values']
A_ : Optional[int] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int , __snake_case : Dict=50_257 , __snake_case : List[str]=1_024 , __snake_case : Dict=768 , __snake_case : Optional[int]=12 , __snake_case : str=12 , __snake_case : List[str]=None , __snake_case : List[str]="gelu_pytorch_tanh" , __snake_case : Optional[Any]=0.1 , __snake_case : str=0.1 , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=1E-5 , __snake_case : Dict=0.02 , __snake_case : Optional[int]=True , __snake_case : Tuple=True , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=True , __snake_case : Optional[Any]=True , __snake_case : List[Any]=True , **__snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : int = n_positions
UpperCAmelCase_ : Any = n_embd
UpperCAmelCase_ : Union[str, Any] = n_layer
UpperCAmelCase_ : List[str] = n_head
UpperCAmelCase_ : List[Any] = n_inner
UpperCAmelCase_ : Optional[int] = activation_function
UpperCAmelCase_ : str = resid_pdrop
UpperCAmelCase_ : Optional[Any] = embd_pdrop
UpperCAmelCase_ : str = attn_pdrop
UpperCAmelCase_ : Any = layer_norm_epsilon
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Optional[int] = scale_attn_weights
UpperCAmelCase_ : Union[str, Any] = use_cache
UpperCAmelCase_ : Dict = attention_softmax_in_fpaa
UpperCAmelCase_ : Union[str, Any] = scale_attention_softmax_in_fpaa
UpperCAmelCase_ : Optional[int] = multi_query
UpperCAmelCase_ : Optional[Any] = bos_token_id
UpperCAmelCase_ : Tuple = eos_token_id
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
| 641
| 0
|
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
if index == number_of_items:
return 0
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : Optional[Any] = knapsack(__lowercase , __lowercase , __lowercase , __lowercase , index + 1 )
if weights[index] <= max_weight:
UpperCAmelCase_ : Any = values[index] + knapsack(
__lowercase , __lowercase , __lowercase , max_weight - weights[index] , index + 1 )
return max(__lowercase , __lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( __lowercase , __lowercase , __lowercase=None , **__lowercase ):
UpperCAmelCase_ : Tuple = [x.strip() for x in open(__lowercase ).readlines()]
UpperCAmelCase_ : Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )]
UpperCAmelCase_ : int = calculate_rouge(__lowercase , __lowercase , **__lowercase )
if save_path is not None:
save_json(__lowercase , __lowercase , indent=__lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 641
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = StableDiffusionXLImgaImgPipeline
A_ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
A_ : Dict = PipelineTesterMixin.required_optional_params - {'latents'}
A_ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
A_ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__snake_case , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
UpperCAmelCase_ : List[str] = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
UpperCAmelCase_ : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=32 , )
UpperCAmelCase_ : Any = CLIPTextModel(__snake_case )
UpperCAmelCase_ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__snake_case )
UpperCAmelCase_ : Any = CLIPTextModelWithProjection(__snake_case )
UpperCAmelCase_ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__snake_case )
UpperCAmelCase_ : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : Any=0 ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
UpperCAmelCase_ : str = image / 2 + 0.5
if str(__snake_case ).startswith('''mps''' ):
UpperCAmelCase_ : Tuple = torch.manual_seed(__snake_case )
else:
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
UpperCAmelCase_ : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = StableDiffusionXLImgaImgPipeline(**__snake_case )
UpperCAmelCase_ : Dict = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : int = self.get_dummy_inputs(__snake_case )
UpperCAmelCase_ : str = sd_pipe(**__snake_case ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : str ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : List[str] = StableDiffusionXLImgaImgPipeline(**__snake_case )
UpperCAmelCase_ : List[str] = sd_pipe.to(__snake_case )
UpperCAmelCase_ : List[str] = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
# forward without prompt embeds
UpperCAmelCase_ : str = self.get_dummy_inputs(__snake_case )
UpperCAmelCase_ : List[Any] = 3 * ['''this is a negative prompt''']
UpperCAmelCase_ : str = negative_prompt
UpperCAmelCase_ : Union[str, Any] = 3 * [inputs['''prompt''']]
UpperCAmelCase_ : int = sd_pipe(**__snake_case )
UpperCAmelCase_ : List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
UpperCAmelCase_ : Optional[int] = self.get_dummy_inputs(__snake_case )
UpperCAmelCase_ : str = 3 * ['''this is a negative prompt''']
UpperCAmelCase_ : str = 3 * [inputs.pop('''prompt''' )]
(
UpperCAmelCase_
) : Dict = sd_pipe.encode_prompt(__snake_case , negative_prompt=__snake_case )
UpperCAmelCase_ : Any = sd_pipe(
**__snake_case , prompt_embeds=__snake_case , negative_prompt_embeds=__snake_case , pooled_prompt_embeds=__snake_case , negative_pooled_prompt_embeds=__snake_case , )
UpperCAmelCase_ : Tuple = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Any , __snake_case : Tuple , __snake_case : List[str]="cpu" , __snake_case : List[Any]=torch.floataa , __snake_case : Tuple=0 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
UpperCAmelCase_ : Tuple = np.random.RandomState(__snake_case ).standard_normal((1, 4, 64, 64) )
UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).to(device=__snake_case , dtype=__snake_case )
UpperCAmelCase_ : Dict = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Dict = self.get_inputs(__snake_case )
UpperCAmelCase_ : Dict = pipe(**__snake_case ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : int = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 721
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : int , __snake_case : List[Any] , __snake_case : List[Any]=13 , __snake_case : str=7 , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=True , __snake_case : str=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=True , __snake_case : List[str]=False , __snake_case : List[str]=False , __snake_case : Tuple=False , __snake_case : List[str]=2 , __snake_case : Optional[int]=99 , __snake_case : Tuple=0 , __snake_case : int=32 , __snake_case : Optional[int]=5 , __snake_case : str=4 , __snake_case : str=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : List[str]=512 , __snake_case : Tuple=2 , __snake_case : List[Any]=0.02 , __snake_case : Any=2 , __snake_case : Optional[int]=4 , __snake_case : Optional[Any]="last" , __snake_case : Dict=True , __snake_case : Any=None , __snake_case : str=0 , ):
'''simple docstring'''
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : List[Any] = use_input_lengths
UpperCAmelCase_ : Dict = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : int = gelu_activation
UpperCAmelCase_ : str = sinusoidal_embeddings
UpperCAmelCase_ : List[str] = causal
UpperCAmelCase_ : Tuple = asm
UpperCAmelCase_ : List[Any] = n_langs
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Any = n_special
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : List[Any] = num_choices
UpperCAmelCase_ : Any = summary_type
UpperCAmelCase_ : Optional[int] = use_proj
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = bos_token_id
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
if self.use_input_lengths:
UpperCAmelCase_ : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : int = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case , lengths=__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : str , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Tuple , __snake_case : Any , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
UpperCAmelCase_ : Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : str , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : List[str] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
UpperCAmelCase_ : Optional[Any] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((UpperCAmelCase_) , ) : Union[str, Any] = result_with_labels.to_tuple()
UpperCAmelCase_ : Optional[int] = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((UpperCAmelCase_) , ) : str = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : Optional[int] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : Any , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Optional[int] = XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : List[str] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : List[Any] , __snake_case : str , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ : int = self.num_choices
UpperCAmelCase_ : int = XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Any = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A_ : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self : str , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : str=False ):
'''simple docstring'''
UpperCAmelCase_ : int = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
UpperCAmelCase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = XLMModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def _lowerCamelCase ( self : str , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[int] , __snake_case : Union[str, Any]=False , __snake_case : Optional[Any]=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : Dict = min_length + idx + 1
UpperCAmelCase_ : List[Any] = min_length + idx + 1
UpperCAmelCase_ : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def _lowerCamelCase ( self : List[Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[Any]=False , __snake_case : str=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : str = min_length + idx + 1
UpperCAmelCase_ : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(__snake_case )
UpperCAmelCase_ : str = torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
UpperCAmelCase_ : Union[str, Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase_ : Dict = model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 641
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__UpperCamelCase : Optional[Any] = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 700
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__UpperCamelCase : Dict = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(snake_case__ )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'rag'
A_ : Tuple = True
def __init__( self : int , __snake_case : List[str]=None , __snake_case : List[Any]=True , __snake_case : Optional[int]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : Any=None , __snake_case : Optional[int]=None , __snake_case : Optional[int]=" / " , __snake_case : Any=" // " , __snake_case : Tuple=5 , __snake_case : Union[str, Any]=300 , __snake_case : Any=768 , __snake_case : Tuple=8 , __snake_case : int="wiki_dpr" , __snake_case : Optional[int]="train" , __snake_case : Tuple="compressed" , __snake_case : Optional[int]=None , __snake_case : List[Any]=None , __snake_case : Optional[int]=False , __snake_case : str=False , __snake_case : Dict=0.0 , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=False , __snake_case : str=False , __snake_case : str=False , __snake_case : Optional[Any]=True , __snake_case : int=None , **__snake_case : str , ):
'''simple docstring'''
super().__init__(
bos_token_id=__snake_case , pad_token_id=__snake_case , eos_token_id=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , is_encoder_decoder=__snake_case , prefix=__snake_case , vocab_size=__snake_case , **__snake_case , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCAmelCase_ : Dict = kwargs.pop('''question_encoder''' )
UpperCAmelCase_ : List[Any] = question_encoder_config.pop('''model_type''' )
UpperCAmelCase_ : Any = kwargs.pop('''generator''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : int = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = reduce_loss
UpperCAmelCase_ : List[Any] = label_smoothing
UpperCAmelCase_ : Tuple = exclude_bos_score
UpperCAmelCase_ : int = do_marginalize
UpperCAmelCase_ : Tuple = title_sep
UpperCAmelCase_ : Union[str, Any] = doc_sep
UpperCAmelCase_ : Any = n_docs
UpperCAmelCase_ : Optional[int] = max_combined_length
UpperCAmelCase_ : Any = dataset
UpperCAmelCase_ : List[Any] = dataset_split
UpperCAmelCase_ : Union[str, Any] = index_name
UpperCAmelCase_ : List[str] = retrieval_vector_size
UpperCAmelCase_ : Optional[Any] = retrieval_batch_size
UpperCAmelCase_ : Optional[int] = passages_path
UpperCAmelCase_ : Optional[Any] = index_path
UpperCAmelCase_ : List[Any] = use_dummy_dataset
UpperCAmelCase_ : int = output_retrieved
UpperCAmelCase_ : int = do_deduplication
UpperCAmelCase_ : Optional[int] = use_cache
if self.forced_eos_token_id is None:
UpperCAmelCase_ : int = getattr(self.generator , '''forced_eos_token_id''' , __snake_case )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : str ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Optional[int] = self.question_encoder.to_dict()
UpperCAmelCase_ : Dict = self.generator.to_dict()
UpperCAmelCase_ : Optional[Any] = self.__class__.model_type
return output
| 641
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Dict = 'rwkv'
A_ : List[str] = {'max_position_embeddings': 'context_length'}
def __init__( self : Union[str, Any] , __snake_case : Dict=50_277 , __snake_case : Any=1_024 , __snake_case : Optional[Any]=4_096 , __snake_case : Tuple=32 , __snake_case : Any=None , __snake_case : str=None , __snake_case : Tuple=1E-5 , __snake_case : Union[str, Any]=0 , __snake_case : List[Any]=0 , __snake_case : Optional[Any]=6 , __snake_case : str=False , __snake_case : int=True , **__snake_case : str , ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Tuple = context_length
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : Optional[int] = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCAmelCase_ : int = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCAmelCase_ : List[Any] = layer_norm_epsilon
UpperCAmelCase_ : Union[str, Any] = rescale_every
UpperCAmelCase_ : Tuple = use_cache
UpperCAmelCase_ : Optional[int] = bos_token_id
UpperCAmelCase_ : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
| 701
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def snake_case_ ( ):
UpperCAmelCase_ : str = HfArgumentParser(__lowercase )
UpperCAmelCase_ : Optional[Any] = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase_ : Optional[int] = TensorFlowBenchmark(args=__lowercase )
try:
UpperCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCAmelCase_ : List[Any] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
UpperCAmelCase_ : List[str] = ''' '''.join(str(__lowercase ).split(''' ''' )[:-1] )
UpperCAmelCase_ : Optional[int] = ''''''
UpperCAmelCase_ : Dict = eval(str(__lowercase ).split(''' ''' )[-1] )
UpperCAmelCase_ : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__lowercase )
if len(__lowercase ) > 0:
UpperCAmelCase_ : Tuple = full_error_msg + begin_error_msg + str(__lowercase )
raise ValueError(__lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 641
| 0
|
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : List[Any] = 2
while i * i <= n:
UpperCAmelCase_ : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def snake_case_ ( ):
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Optional[Any] = 1
while True:
i += 1
t_num += i
if count_divisors(__lowercase ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 702
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[Any] = 'unispeech-sat'
def __init__( self : int , __snake_case : Optional[int]=32 , __snake_case : Dict=768 , __snake_case : Optional[Any]=12 , __snake_case : Optional[int]=12 , __snake_case : Dict=3_072 , __snake_case : List[str]="gelu" , __snake_case : Any=0.1 , __snake_case : Tuple=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Tuple=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Dict=0.02 , __snake_case : Optional[Any]=1E-5 , __snake_case : Optional[int]="group" , __snake_case : str="gelu" , __snake_case : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , __snake_case : str=(5, 2, 2, 2, 2, 2, 2) , __snake_case : Tuple=(10, 3, 3, 3, 3, 2, 2) , __snake_case : int=False , __snake_case : Optional[int]=128 , __snake_case : Any=16 , __snake_case : Union[str, Any]=False , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=0.05 , __snake_case : Dict=10 , __snake_case : int=2 , __snake_case : Optional[Any]=0.0 , __snake_case : Optional[int]=10 , __snake_case : List[Any]=0 , __snake_case : Optional[int]=320 , __snake_case : int=2 , __snake_case : Any=0.1 , __snake_case : Optional[int]=100 , __snake_case : Tuple=256 , __snake_case : List[str]=256 , __snake_case : List[Any]=0.1 , __snake_case : Tuple="mean" , __snake_case : List[Any]=False , __snake_case : List[str]=False , __snake_case : Optional[Any]=256 , __snake_case : Tuple=(512, 512, 512, 512, 1_500) , __snake_case : Optional[int]=(5, 3, 3, 1, 1) , __snake_case : Any=(1, 2, 3, 1, 1) , __snake_case : int=512 , __snake_case : Optional[int]=0 , __snake_case : Dict=1 , __snake_case : Tuple=2 , __snake_case : Union[str, Any]=504 , **__snake_case : List[str] , ):
'''simple docstring'''
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : int = feat_extract_norm
UpperCAmelCase_ : Dict = feat_extract_activation
UpperCAmelCase_ : Union[str, Any] = list(__snake_case )
UpperCAmelCase_ : List[str] = list(__snake_case )
UpperCAmelCase_ : Any = list(__snake_case )
UpperCAmelCase_ : Any = conv_bias
UpperCAmelCase_ : List[str] = num_conv_pos_embeddings
UpperCAmelCase_ : Dict = num_conv_pos_embedding_groups
UpperCAmelCase_ : Optional[int] = len(self.conv_dim )
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = hidden_dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Optional[Any] = activation_dropout
UpperCAmelCase_ : Dict = feat_proj_dropout
UpperCAmelCase_ : Optional[Any] = final_dropout
UpperCAmelCase_ : List[Any] = layerdrop
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : int = num_clusters
UpperCAmelCase_ : int = do_stable_layer_norm
UpperCAmelCase_ : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : int = apply_spec_augment
UpperCAmelCase_ : Optional[Any] = mask_time_prob
UpperCAmelCase_ : str = mask_time_length
UpperCAmelCase_ : Any = mask_time_min_masks
UpperCAmelCase_ : str = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Optional[int] = num_codevectors_per_group
UpperCAmelCase_ : int = num_codevector_groups
UpperCAmelCase_ : List[str] = contrastive_logits_temperature
UpperCAmelCase_ : int = feat_quantizer_dropout
UpperCAmelCase_ : List[str] = num_negatives
UpperCAmelCase_ : Any = codevector_dim
UpperCAmelCase_ : Tuple = proj_codevector_dim
UpperCAmelCase_ : Union[str, Any] = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : Any = ctc_loss_reduction
UpperCAmelCase_ : Optional[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Union[str, Any] = xvector_output_dim
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 641
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : str = 3_8_4
if "tiny" in model_name:
UpperCAmelCase_ : Any = [3, 3, 9, 3]
UpperCAmelCase_ : Union[str, Any] = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "small" in model_name:
UpperCAmelCase_ : Optional[int] = [3, 3, 2_7, 3]
UpperCAmelCase_ : Dict = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "base" in model_name:
UpperCAmelCase_ : List[str] = [3, 3, 2_7, 3]
UpperCAmelCase_ : Optional[Any] = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4]
UpperCAmelCase_ : Any = 5_1_2
if "large" in model_name:
UpperCAmelCase_ : Optional[int] = [3, 3, 2_7, 3]
UpperCAmelCase_ : Dict = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6]
UpperCAmelCase_ : int = 7_6_8
if "xlarge" in model_name:
UpperCAmelCase_ : Any = [3, 3, 2_7, 3]
UpperCAmelCase_ : int = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8]
UpperCAmelCase_ : Optional[int] = 1_0_2_4
# set label information
UpperCAmelCase_ : Dict = 1_5_0
UpperCAmelCase_ : Any = '''huggingface/label-files'''
UpperCAmelCase_ : Tuple = '''ade20k-id2label.json'''
UpperCAmelCase_ : Optional[int] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ : Union[str, Any] = {int(__lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : int = ConvNextConfig(
depths=__lowercase , hidden_sizes=__lowercase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
UpperCAmelCase_ : List[Any] = UperNetConfig(
backbone_config=__lowercase , auxiliary_in_channels=__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase , )
return config
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : str = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : List[str] = dct.pop(__lowercase )
UpperCAmelCase_ : List[Any] = val
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : str = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
UpperCAmelCase_ : List[str] = model_name_to_url[model_name]
UpperCAmelCase_ : List[Any] = torch.hub.load_state_dict_from_url(__lowercase , map_location='''cpu''' )['''state_dict''']
UpperCAmelCase_ : Optional[Any] = get_upernet_config(__lowercase )
UpperCAmelCase_ : List[Any] = UperNetForSemanticSegmentation(__lowercase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
UpperCAmelCase_ : str = state_dict.pop(__lowercase )
if "bn" in key:
UpperCAmelCase_ : Tuple = key.replace('''bn''' , '''batch_norm''' )
UpperCAmelCase_ : Optional[int] = val
# rename keys
UpperCAmelCase_ : List[Any] = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
model.load_state_dict(__lowercase )
# verify on image
UpperCAmelCase_ : Optional[int] = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
UpperCAmelCase_ : Union[str, Any] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('''RGB''' )
UpperCAmelCase_ : List[Any] = SegformerImageProcessor()
UpperCAmelCase_ : int = processor(__lowercase , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
UpperCAmelCase_ : Tuple = model(__lowercase )
if model_name == "upernet-convnext-tiny":
UpperCAmelCase_ : Optional[Any] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
UpperCAmelCase_ : Union[str, Any] = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
UpperCAmelCase_ : List[Any] = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
UpperCAmelCase_ : Dict = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
UpperCAmelCase_ : Any = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowercase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__lowercase )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 703
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case_ ( __lowercase , __lowercase ):
# Load checkpoint
UpperCAmelCase_ : Tuple = torch.load(__lowercase , map_location='''cpu''' )
UpperCAmelCase_ : Optional[int] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
UpperCAmelCase_ : str = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCAmelCase_ : Tuple = v
else:
UpperCAmelCase_ : Union[str, Any] = v
UpperCAmelCase_ : int = chkpt['''params''']
UpperCAmelCase_ : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(__lowercase , (torch.FloatTensor, numpy.ndarray) )}
UpperCAmelCase_ : int = chkpt['''dico_word2id''']
UpperCAmelCase_ : List[Any] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 1_3 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(__lowercase , __lowercase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : Dict = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 641
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCamelCase : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 704
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Optional[Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = VOCAB_FILES_NAMES
A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : str = ['input_ids', 'attention_mask']
A_ : int = TaTokenizer
A_ : List[int] = []
def __init__( self : Union[str, Any] , __snake_case : Tuple=None , __snake_case : List[Any]=None , __snake_case : int="</s>" , __snake_case : List[Any]="<unk>" , __snake_case : Dict="<pad>" , __snake_case : Tuple=100 , __snake_case : int=None , **__snake_case : Any , ):
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ : Optional[int] = [f'''<extra_id_{i}>''' for i in range(__snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCAmelCase_ : Any = len(set(filter(lambda __snake_case : bool('''extra_id_''' in str(__snake_case ) ) , __snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
__snake_case , tokenizer_file=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , extra_ids=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
UpperCAmelCase_ : str = vocab_file
UpperCAmelCase_ : List[str] = False if not self.vocab_file else True
UpperCAmelCase_ : Union[str, Any] = extra_ids
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : List[str] , __snake_case : Tuple ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCAmelCase_ : str = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __snake_case , )
return max_model_length
def _lowerCamelCase ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : str = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def _lowerCamelCase ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCAmelCase_ : int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCamelCase ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : int = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return list(
set(filter(lambda __snake_case : bool(re.search(R'''<extra_id_\d+>''' , __snake_case ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [self.convert_tokens_to_ids(__snake_case ) for token in self.get_sentinel_tokens()]
| 641
| 0
|
import string
from math import logaa
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : List[Any] = document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
UpperCAmelCase_ : Optional[Any] = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Dict = corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCAmelCase_ : str = corpus_without_punctuation.split('''\n''' )
UpperCAmelCase_ : Optional[Any] = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__lowercase ))
def snake_case_ ( __lowercase , __lowercase , __lowercase=False ):
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def snake_case_ ( __lowercase , __lowercase ):
return round(tf * idf , 3 )
| 705
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : str = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 0
|
'''simple docstring'''
__UpperCamelCase : Optional[Any] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 706
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = ['image_processor', 'tokenizer']
A_ : int = 'LayoutLMv2ImageProcessor'
A_ : str = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : List[str]=None , **__snake_case : Optional[int] ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
UpperCAmelCase_ : List[Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__snake_case , __snake_case )
def __call__( self : List[str] , __snake_case : Dict , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Optional[int] , ):
'''simple docstring'''
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCAmelCase_ : Tuple = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_ : Any = features['''words''']
UpperCAmelCase_ : str = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
UpperCAmelCase_ : List[str] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase_ : Optional[int] = self.get_overflowing_images(__snake_case , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase_ : List[Any] = images
return encoded_inputs
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def _lowerCamelCase ( self : List[Any] , *__snake_case : Optional[int] , **__snake_case : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : str , *__snake_case : Optional[Any] , **__snake_case : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __snake_case , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __snake_case , )
return self.image_processor
| 641
| 0
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : int = multiprocessing.Manager()
UpperCAmelCase_ : Union[str, Any] = manager.list()
UpperCAmelCase_ : int = multiprocessing.Process(target=__lowercase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
UpperCAmelCase_ : str = shutil.rmtree
UpperCAmelCase_ : Tuple = os.rmdir
UpperCAmelCase_ : Dict = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
UpperCAmelCase_ : Optional[int] = {}
with swallow_io():
with time_limit(__lowercase ):
exec(__lowercase , __lowercase )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(F'''failed: {e}''' )
# Needed for cleaning up.
UpperCAmelCase_ : Optional[int] = rmtree
UpperCAmelCase_ : Optional[Any] = rmdir
UpperCAmelCase_ : Optional[Any] = chdir
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
def signal_handler(__lowercase , __lowercase ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , __lowercase )
signal.signal(signal.SIGALRM , __lowercase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def snake_case_ ( ):
UpperCAmelCase_ : Optional[Any] = WriteOnlyStringIO()
with contextlib.redirect_stdout(__lowercase ):
with contextlib.redirect_stderr(__lowercase ):
with redirect_stdin(__lowercase ):
yield
@contextlib.contextmanager
def snake_case_ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(__lowercase ):
yield dirname
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
pass
class lowerCAmelCase__( io.StringIO ):
'''simple docstring'''
def _lowerCamelCase ( self : Dict , *__snake_case : List[Any] , **__snake_case : int ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Dict , *__snake_case : int , **__snake_case : Any ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : int , *__snake_case : List[str] , **__snake_case : Optional[Any] ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
'''simple docstring'''
return False
class lowerCAmelCase__( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
A_ : Optional[Any] = 'stdin'
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
if root == ".":
yield
return
UpperCAmelCase_ : Tuple = os.getcwd()
os.chdir(__lowercase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__lowercase )
def snake_case_ ( __lowercase=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = None
import os
UpperCAmelCase_ : Union[str, Any] = '''1'''
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
import shutil
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
import subprocess
UpperCAmelCase_ : Dict = None # type: ignore
UpperCAmelCase_ : Union[str, Any] = None
import sys
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
| 707
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : int
A_ : int
A_ : float
A_ : float
A_ : Tuple[int]
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = torch.arange(self.height * self.width )
UpperCAmelCase_ : Any = torch.stack(
[
pixel_indices % self.width,
torch.div(__snake_case , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ : List[Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(__snake_case ) )
UpperCAmelCase_ : str = self.get_image_coords()
UpperCAmelCase_ : List[str] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Tuple = self.get_camera_rays(__snake_case )
UpperCAmelCase_ : Union[str, Any] = rays.view(__snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _lowerCamelCase ( self : Dict , __snake_case : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : int = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : str = coords.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = self.resolution()
UpperCAmelCase_ : Optional[Any] = self.fov()
UpperCAmelCase_ : int = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[Any] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Optional[Any] = fracs.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(__snake_case , 1 , 3 )
+ self.x.view(__snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ : Optional[int] = torch.stack(
[
torch.broadcast_to(self.origin.view(__snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__snake_case , *__snake_case , 2 , 3 )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__snake_case , height=__snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : Tuple = np.array([np.sin(__lowercase ), np.cos(__lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : str = -z * 4
UpperCAmelCase_ : List[Any] = np.array([np.cos(__lowercase ), -np.sin(__lowercase ), 0.0] )
UpperCAmelCase_ : Tuple = np.cross(__lowercase , __lowercase )
origins.append(__lowercase )
xs.append(__lowercase )
ys.append(__lowercase )
zs.append(__lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , width=__lowercase , height=__lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowercase )) , )
| 641
| 0
|
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__UpperCamelCase : Tuple = '.'
if __name__ == "__main__":
__UpperCamelCase : List[Any] = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
__UpperCamelCase : List[str] = []
__UpperCamelCase : str = []
with open(doctest_file_path) as fp:
for line in fp:
__UpperCamelCase : Union[str, Any] = line.strip()
__UpperCamelCase : Optional[int] = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__UpperCamelCase : List[Any] = '\n'.join(non_existent_paths)
raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 708
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__UpperCamelCase : Dict = logging.getLogger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = 'token-classification'
def __init__( self : Any , __snake_case : Optional[Any] ):
'''simple docstring'''
if type(__snake_case ) == dict:
UpperCAmelCase_ : Tuple = Namespace(**__snake_case )
UpperCAmelCase_ : Dict = import_module('''tasks''' )
try:
UpperCAmelCase_ : int = getattr(__snake_case , hparams.task_type )
UpperCAmelCase_ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
UpperCAmelCase_ : int = self.token_classification_task.get_labels(hparams.labels )
UpperCAmelCase_ : Dict = CrossEntropyLoss().ignore_index
super().__init__(__snake_case , len(self.labels ) , self.mode )
def _lowerCamelCase ( self : Optional[int] , **__snake_case : Optional[Any] ):
'''simple docstring'''
return self.model(**__snake_case )
def _lowerCamelCase ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Union[str, Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : str = self(**__snake_case )
UpperCAmelCase_ : Any = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.hparams
for mode in ["train", "dev", "test"]:
UpperCAmelCase_ : Optional[Any] = self._feature_file(__snake_case )
if os.path.exists(__snake_case ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Any = torch.load(__snake_case )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
UpperCAmelCase_ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , __snake_case )
UpperCAmelCase_ : List[str] = self.token_classification_task.convert_examples_to_features(
__snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__snake_case , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , __snake_case )
torch.save(__snake_case , __snake_case )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int , __snake_case : bool = False ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self._feature_file(__snake_case )
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Optional[int] = torch.load(__snake_case )
UpperCAmelCase_ : Any = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCAmelCase_ : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCAmelCase_ : Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCAmelCase_ : Optional[int] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCAmelCase_ : int = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__snake_case , __snake_case , __snake_case , __snake_case ) , batch_size=__snake_case )
def _lowerCamelCase ( self : List[Any] , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
"""Compute validation""" ""
UpperCAmelCase_ : str = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Any = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : int = self(**__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs[:2]
UpperCAmelCase_ : Optional[int] = logits.detach().cpu().numpy()
UpperCAmelCase_ : List[Any] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self : List[str] , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
UpperCAmelCase_ : Dict = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Any = np.argmax(__snake_case , axis=2 )
UpperCAmelCase_ : int = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Tuple = dict(enumerate(self.labels ) )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase_ : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCAmelCase_ : Union[str, Any] = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(__snake_case , __snake_case ),
'''precision''': precision_score(__snake_case , __snake_case ),
'''recall''': recall_score(__snake_case , __snake_case ),
'''f1''': fa_score(__snake_case , __snake_case ),
}
UpperCAmelCase_ : str = dict(results.items() )
UpperCAmelCase_ : List[Any] = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self : List[str] , __snake_case : int ):
'''simple docstring'''
# when stable
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = self._eval_end(__snake_case )
UpperCAmelCase_ : int = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self : List[Any] , __snake_case : Tuple ):
'''simple docstring'''
# updating to test_epoch_end instead of deprecated test_end
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._eval_end(__snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCAmelCase_ : Optional[Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
# Add NER specific options
BaseTransformer.add_model_specific_args(__snake_case , __snake_case )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=__snake_case , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__snake_case , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=__snake_case , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__snake_case , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__UpperCamelCase : Dict = NERTransformer.add_model_specific_args(parser, os.getcwd())
__UpperCamelCase : Tuple = parser.parse_args()
__UpperCamelCase : Optional[Any] = NERTransformer(args)
__UpperCamelCase : int = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__UpperCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
__UpperCamelCase : List[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 641
| 0
|
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : str , __snake_case : str=2 , __snake_case : Any=3 , __snake_case : Optional[Any]=64 , __snake_case : List[Any]=None ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = np.random.default_rng(__snake_case )
UpperCAmelCase_ : str = length
UpperCAmelCase_ : Union[str, Any] = rng.normal(size=(length,) ).astype(np.floataa )
UpperCAmelCase_ : int = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Tuple ):
'''simple docstring'''
return self.length
def __getitem__( self : List[Any] , __snake_case : List[str] ):
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class lowerCAmelCase__( torch.nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , __snake_case : List[Any]=0 , __snake_case : Union[str, Any]=0 , __snake_case : str=False ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCAmelCase_ : Any = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCAmelCase_ : List[Any] = True
def _lowerCamelCase ( self : List[Any] , __snake_case : Any=None ):
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
UpperCAmelCase_ : Any = False
return x * self.a[0] + self.b[0]
class lowerCAmelCase__( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __snake_case : List[str]=0 , __snake_case : Optional[int]=0 , __snake_case : Union[str, Any]=False ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : Optional[int] = torch.nn.Parameter(torch.tensor(__snake_case ).float() )
UpperCAmelCase_ : Dict = torch.nn.Parameter(torch.tensor(__snake_case ).float() )
UpperCAmelCase_ : List[str] = True
def _lowerCamelCase ( self : Any , __snake_case : str=None ):
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
UpperCAmelCase_ : int = False
return x * self.a + self.b
def snake_case_ ( __lowercase , __lowercase = 1_6 ):
from datasets import load_dataset
from transformers import AutoTokenizer
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase_ : Dict = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
UpperCAmelCase_ : Optional[Any] = load_dataset('''csv''' , data_files=__lowercase )
UpperCAmelCase_ : Optional[int] = datasets['''train'''].unique('''label''' )
UpperCAmelCase_ : str = {v: i for i, v in enumerate(__lowercase )}
def tokenize_function(__lowercase ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Optional[Any] = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowercase , max_length=__lowercase , padding='''max_length''' )
if "label" in examples:
UpperCAmelCase_ : int = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ : int = datasets.map(
__lowercase , batched=__lowercase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(__lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowercase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return tokenizer.pad(__lowercase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase_ : Tuple = DataLoader(tokenized_datasets['''train'''] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=2 )
UpperCAmelCase_ : Dict = DataLoader(tokenized_datasets['''validation'''] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=1 )
return train_dataloader, eval_dataloader
| 709
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'encoder-decoder'
A_ : Optional[int] = True
def __init__( self : Dict , **__snake_case : Union[str, Any] ):
'''simple docstring'''
super().__init__(**__snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCAmelCase_ : int = kwargs.pop('''encoder''' )
UpperCAmelCase_ : List[Any] = encoder_config.pop('''model_type''' )
UpperCAmelCase_ : int = kwargs.pop('''decoder''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Optional[int] = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : List[Any] = True
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Union[str, Any] ):
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Tuple = self.encoder.to_dict()
UpperCAmelCase_ : Tuple = self.decoder.to_dict()
UpperCAmelCase_ : Tuple = self.__class__.model_type
return output
| 641
| 0
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Any = 3
UpperCAmelCase_ : List[str] = (32, 32)
UpperCAmelCase_ : Any = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__snake_case )
return image
@property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(__snake_case )
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
def extract(*__snake_case : Any , **__snake_case : List[str] ):
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = torch.ones([0] )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : List[Any] ):
'''simple docstring'''
self.pixel_values.to(__snake_case )
return self
return Out()
return extract
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[str] = self.dummy_cond_unet
UpperCAmelCase_ : Tuple = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
UpperCAmelCase_ : int = self.dummy_vae
UpperCAmelCase_ : Dict = self.dummy_text_encoder
UpperCAmelCase_ : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : int = StableDiffusionPipeline(
unet=__snake_case , scheduler=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , safety_checker=__snake_case , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ : int = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : int = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ : Optional[int] = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ : Any = sd_pipe([prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : str = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__snake_case , )[0]
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Optional[int] = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.dummy_cond_unet
UpperCAmelCase_ : int = PNDMScheduler(skip_prk_steps=__snake_case )
UpperCAmelCase_ : List[str] = self.dummy_vae
UpperCAmelCase_ : Dict = self.dummy_text_encoder
UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Optional[Any] = StableDiffusionPipeline(
unet=__snake_case , scheduler=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , safety_checker=__snake_case , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ : Dict = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Dict = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ : List[Any] = sd_pipe([prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Tuple = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__snake_case , )[0]
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : List[Any] = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(pipe.scheduler , __snake_case )
assert pipe.safety_checker is None
UpperCAmelCase_ : List[str] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__snake_case )
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(__snake_case )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase_ : int = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.dummy_cond_unet
UpperCAmelCase_ : Optional[int] = PNDMScheduler(skip_prk_steps=__snake_case )
UpperCAmelCase_ : Tuple = self.dummy_vae
UpperCAmelCase_ : str = self.dummy_text_encoder
UpperCAmelCase_ : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
UpperCAmelCase_ : Tuple = unet.half()
UpperCAmelCase_ : Dict = vae.half()
UpperCAmelCase_ : Tuple = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Tuple = StableDiffusionPipeline(
unet=__snake_case , scheduler=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , safety_checker=__snake_case , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ : Optional[Any] = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Union[str, Any] = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__snake_case )
UpperCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : List[Any] = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Any = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
UpperCAmelCase_ : Optional[Any] = 4_003_660_346
UpperCAmelCase_ : Optional[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase_ : List[Any] = torch.manual_seed(__snake_case )
UpperCAmelCase_ : Optional[int] = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=__snake_case , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCAmelCase_ : str = output.images
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
UpperCAmelCase_ : List[str] = torch.manual_seed(__snake_case )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=__snake_case , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__snake_case )
UpperCAmelCase_ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : Optional[Any] = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Optional[Any] = '''padme amidala taking a bath artwork, safe for work, no nudity'''
UpperCAmelCase_ : str = 2_734_971_755
UpperCAmelCase_ : Tuple = 7
UpperCAmelCase_ : Any = torch.manual_seed(__snake_case )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=__snake_case , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCAmelCase_ : int = output.images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
UpperCAmelCase_ : Optional[int] = torch.manual_seed(__snake_case )
UpperCAmelCase_ : Union[str, Any] = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=__snake_case , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : int = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
UpperCAmelCase_ : int = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Union[str, Any] = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
UpperCAmelCase_ : Optional[Any] = 1_044_355_234
UpperCAmelCase_ : int = 12
UpperCAmelCase_ : List[Any] = torch.manual_seed(__snake_case )
UpperCAmelCase_ : Optional[int] = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=__snake_case , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
UpperCAmelCase_ : Dict = torch.manual_seed(__snake_case )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=__snake_case , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 710
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def snake_case_ ( ):
UpperCAmelCase_ : int = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
UpperCAmelCase_ : Dict = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('''RGB''' )
return image
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Any = dct.pop(__lowercase )
UpperCAmelCase_ : Optional[Any] = val
def snake_case_ ( __lowercase , __lowercase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCAmelCase_ : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCAmelCase_ : int = torch.cat((q_bias, torch.zeros_like(__lowercase , requires_grad=__lowercase ), v_bias) )
UpperCAmelCase_ : List[str] = qkv_bias
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = 3_6_4 if '''coco''' in model_name else 2_2_4
UpperCAmelCase_ : Any = BlipaVisionConfig(image_size=__lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase_ : Any = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=__lowercase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase_ : List[str] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=__lowercase ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase_ : List[str] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase_ : Any = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
UpperCAmelCase_ : List[Any] = BlipaConfig(vision_config=__lowercase , text_config=__lowercase )
return config, image_size
@torch.no_grad()
def snake_case_ ( __lowercase , __lowercase=None , __lowercase=False ):
UpperCAmelCase_ : List[Any] = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
UpperCAmelCase_ : List[str] = tokenizer('''\n''' , add_special_tokens=__lowercase ).input_ids[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = get_blipa_config(__lowercase , eos_token_id=__lowercase )
UpperCAmelCase_ : List[Any] = BlipaForConditionalGeneration(__lowercase ).eval()
UpperCAmelCase_ : Tuple = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = load_model_and_preprocess(
name=__lowercase , model_type=__lowercase , is_eval=__lowercase , device=__lowercase )
original_model.eval()
print('''Done!''' )
# update state dict keys
UpperCAmelCase_ : Optional[Any] = original_model.state_dict()
UpperCAmelCase_ : List[Any] = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(__lowercase )
if key.startswith('''Qformer.bert''' ):
UpperCAmelCase_ : Tuple = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
UpperCAmelCase_ : Optional[Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
UpperCAmelCase_ : Any = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
UpperCAmelCase_ : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
UpperCAmelCase_ : Any = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
UpperCAmelCase_ : Optional[Any] = key.replace('''t5''' , '''language''' )
UpperCAmelCase_ : List[str] = val
# read in qv biases
read_in_q_v_bias(__lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = hf_model.load_state_dict(__lowercase , strict=__lowercase )
assert len(__lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase_ : str = load_demo_image()
UpperCAmelCase_ : Any = vis_processors['''eval'''](__lowercase ).unsqueeze(0 ).to(__lowercase )
UpperCAmelCase_ : Optional[Any] = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(__lowercase )
# create processor
UpperCAmelCase_ : Optional[int] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=__lowercase , image_std=__lowercase )
UpperCAmelCase_ : Tuple = BlipaProcessor(image_processor=__lowercase , tokenizer=__lowercase )
UpperCAmelCase_ : str = processor(images=__lowercase , return_tensors='''pt''' ).pixel_values.to(__lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowercase , __lowercase )
original_model.to(__lowercase )
hf_model.to(__lowercase )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase_ : Tuple = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
UpperCAmelCase_ : Optional[int] = hf_model(__lowercase , __lowercase ).logits
else:
UpperCAmelCase_ : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
UpperCAmelCase_ : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
UpperCAmelCase_ : int = hf_model(__lowercase , __lowercase , labels=__lowercase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=__lowercase )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=__lowercase )
else:
# cast to same type
UpperCAmelCase_ : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(__lowercase ) , __lowercase , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
UpperCAmelCase_ : Union[str, Any] = ''''''
UpperCAmelCase_ : Optional[Any] = tokenizer(__lowercase , return_tensors='''pt''' ).input_ids.to(__lowercase )
UpperCAmelCase_ : int = original_model.generate({'''image''': original_pixel_values} )
UpperCAmelCase_ : Optional[int] = hf_model.generate(
__lowercase , __lowercase , do_sample=__lowercase , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , __lowercase )
UpperCAmelCase_ : Tuple = input_ids.shape[1]
UpperCAmelCase_ : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowercase )
UpperCAmelCase_ : Optional[int] = [text.strip() for text in output_text]
print('''HF generation:''' , __lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowercase )
hf_model.save_pretrained(__lowercase )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
__UpperCamelCase : Optional[Any] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__UpperCamelCase : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 641
| 0
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : List[str] = XCLIPTextConfig()
# derive patch size from model name
UpperCAmelCase_ : Union[str, Any] = model_name.find('''patch''' )
UpperCAmelCase_ : Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
UpperCAmelCase_ : Optional[Any] = XCLIPVisionConfig(patch_size=__lowercase , num_frames=__lowercase )
if "large" in model_name:
UpperCAmelCase_ : Tuple = 7_6_8
UpperCAmelCase_ : Union[str, Any] = 3_0_7_2
UpperCAmelCase_ : Optional[int] = 1_2
UpperCAmelCase_ : Dict = 1_0_2_4
UpperCAmelCase_ : List[str] = 4_0_9_6
UpperCAmelCase_ : int = 1_6
UpperCAmelCase_ : int = 2_4
UpperCAmelCase_ : Tuple = 7_6_8
UpperCAmelCase_ : str = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ : Tuple = 3_3_6
UpperCAmelCase_ : List[str] = XCLIPConfig.from_text_vision_configs(__lowercase , __lowercase )
if "large" in model_name:
UpperCAmelCase_ : Dict = 7_6_8
return config
def snake_case_ ( __lowercase ):
# text encoder
if name == "token_embedding.weight":
UpperCAmelCase_ : List[Any] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
UpperCAmelCase_ : Union[str, Any] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
UpperCAmelCase_ : Optional[int] = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
UpperCAmelCase_ : Dict = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
UpperCAmelCase_ : Tuple = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
UpperCAmelCase_ : Optional[int] = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
UpperCAmelCase_ : Tuple = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
UpperCAmelCase_ : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
UpperCAmelCase_ : Optional[Any] = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
UpperCAmelCase_ : Union[str, Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
UpperCAmelCase_ : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
UpperCAmelCase_ : List[Any] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
UpperCAmelCase_ : str = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
UpperCAmelCase_ : List[Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
UpperCAmelCase_ : List[str] = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
UpperCAmelCase_ : Dict = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
UpperCAmelCase_ : Tuple = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
UpperCAmelCase_ : int = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
UpperCAmelCase_ : List[Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
UpperCAmelCase_ : int = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def snake_case_ ( __lowercase , __lowercase ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : Any = orig_state_dict.pop(__lowercase )
if "attn.in_proj" in key:
UpperCAmelCase_ : List[Any] = key.split('''.''' )
if key.startswith('''visual''' ):
UpperCAmelCase_ : Any = key_split[3]
UpperCAmelCase_ : int = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCAmelCase_ : List[Any] = val[
:dim, :
]
UpperCAmelCase_ : Any = val[
dim : dim * 2, :
]
UpperCAmelCase_ : Tuple = val[
-dim:, :
]
else:
UpperCAmelCase_ : Dict = val[
:dim
]
UpperCAmelCase_ : Union[str, Any] = val[
dim : dim * 2
]
UpperCAmelCase_ : List[Any] = val[
-dim:
]
else:
if "weight" in key:
UpperCAmelCase_ : Union[str, Any] = val[
:dim, :
]
UpperCAmelCase_ : Tuple = val[
dim : dim * 2, :
]
UpperCAmelCase_ : Tuple = val[
-dim:, :
]
else:
UpperCAmelCase_ : int = val[:dim]
UpperCAmelCase_ : int = val[
dim : dim * 2
]
UpperCAmelCase_ : List[str] = val[-dim:]
elif key.startswith('''mit''' ):
UpperCAmelCase_ : List[str] = key_split[2]
UpperCAmelCase_ : int = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCAmelCase_ : Optional[int] = val[:dim, :]
UpperCAmelCase_ : Any = val[dim : dim * 2, :]
UpperCAmelCase_ : Any = val[-dim:, :]
else:
UpperCAmelCase_ : Any = val[:dim]
UpperCAmelCase_ : str = val[dim : dim * 2]
UpperCAmelCase_ : Any = val[-dim:]
else:
UpperCAmelCase_ : str = key_split[2]
UpperCAmelCase_ : str = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase_ : Dict = val[:dim, :]
UpperCAmelCase_ : Optional[Any] = val[
dim : dim * 2, :
]
UpperCAmelCase_ : List[str] = val[-dim:, :]
else:
UpperCAmelCase_ : Any = val[:dim]
UpperCAmelCase_ : Optional[int] = val[
dim : dim * 2
]
UpperCAmelCase_ : int = val[-dim:]
else:
UpperCAmelCase_ : Optional[Any] = rename_key(__lowercase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCAmelCase_ : List[str] = val.T
UpperCAmelCase_ : int = val
return orig_state_dict
def snake_case_ ( __lowercase ):
if num_frames == 8:
UpperCAmelCase_ : Optional[Any] = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 1_6:
UpperCAmelCase_ : int = '''eating_spaghetti.npy'''
elif num_frames == 3_2:
UpperCAmelCase_ : Union[str, Any] = '''eating_spaghetti_32_frames.npy'''
UpperCAmelCase_ : str = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=__lowercase , repo_type='''dataset''' , )
UpperCAmelCase_ : Optional[Any] = np.load(__lowercase )
return list(__lowercase )
def snake_case_ ( __lowercase , __lowercase=None , __lowercase=False ):
UpperCAmelCase_ : Tuple = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
UpperCAmelCase_ : Dict = model_to_url[model_name]
UpperCAmelCase_ : Union[str, Any] = 8
if "16-frames" in model_name:
UpperCAmelCase_ : str = 1_6
elif "shot" in model_name:
UpperCAmelCase_ : Tuple = 3_2
UpperCAmelCase_ : List[str] = get_xclip_config(__lowercase , __lowercase )
UpperCAmelCase_ : Any = XCLIPModel(__lowercase )
model.eval()
if "drive" in checkpoint_url:
UpperCAmelCase_ : Dict = '''pytorch_model.bin'''
gdown.cached_download(__lowercase , __lowercase , quiet=__lowercase )
UpperCAmelCase_ : int = torch.load(__lowercase , map_location='''cpu''' )['''model''']
else:
UpperCAmelCase_ : Tuple = torch.hub.load_state_dict_from_url(__lowercase )['''model''']
UpperCAmelCase_ : List[Any] = convert_state_dict(__lowercase , __lowercase )
UpperCAmelCase_ : Union[str, Any] = XCLIPModel(__lowercase )
UpperCAmelCase_ : Tuple = model.load_state_dict(__lowercase , strict=__lowercase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCAmelCase_ : Tuple = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4
UpperCAmelCase_ : Tuple = VideoMAEImageProcessor(size=__lowercase )
UpperCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
UpperCAmelCase_ : int = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
UpperCAmelCase_ : Union[str, Any] = XCLIPProcessor(image_processor=__lowercase , tokenizer=__lowercase )
UpperCAmelCase_ : List[Any] = prepare_video(__lowercase )
UpperCAmelCase_ : str = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=__lowercase , return_tensors='''pt''' , padding=__lowercase )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**__lowercase )
# Verify outputs
UpperCAmelCase_ : Dict = outputs.logits_per_video
UpperCAmelCase_ : Any = logits_per_video.softmax(dim=1 )
print('''Probs:''' , __lowercase )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCAmelCase_ : Optional[int] = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCAmelCase_ : int = torch.tensor([[7.09_99e-04, 9.98_83e-01, 4.55_80e-04]] )
elif model_name == "xclip-base-patch16":
UpperCAmelCase_ : List[Any] = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCAmelCase_ : str = torch.tensor([[7.69_37e-04, 9.97_28e-01, 1.94_73e-03]] )
elif model_name == "xclip-large-patch14":
UpperCAmelCase_ : str = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ : List[str] = torch.tensor([[3.38_77e-04, 9.99_37e-01, 2.88_88e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCAmelCase_ : Tuple = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCAmelCase_ : List[str] = torch.tensor([[3.85_54e-04, 9.99_29e-01, 3.27_54e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCAmelCase_ : Any = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCAmelCase_ : Any = torch.tensor([[7.18_90e-06, 9.99_94e-01, 5.65_59e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCAmelCase_ : Optional[Any] = torch.tensor([[1.03_20e-05, 9.99_93e-01, 6.24_35e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[4.13_77e-06, 9.99_90e-01, 9.83_86e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCAmelCase_ : int = torch.tensor([[4.13_47e-05, 9.99_62e-01, 3.34_11e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCAmelCase_ : List[str] = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCAmelCase_ : List[Any] = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCAmelCase_ : int = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCAmelCase_ : List[str] = torch.tensor([[9.82_19e-04, 9.95_93e-01, 3.08_63e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCAmelCase_ : int = torch.tensor([[3.50_82e-04, 9.97_85e-01, 1.79_66e-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(__lowercase , __lowercase , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(__lowercase , organization='''nielsr''' )
processor.push_to_hub(__lowercase , organization='''nielsr''' )
slow_tokenizer.push_to_hub(__lowercase , organization='''nielsr''' )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__UpperCamelCase : Tuple = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 711
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : int = multiprocessing.Manager()
UpperCAmelCase_ : Union[str, Any] = manager.list()
UpperCAmelCase_ : int = multiprocessing.Process(target=__lowercase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
UpperCAmelCase_ : str = shutil.rmtree
UpperCAmelCase_ : Tuple = os.rmdir
UpperCAmelCase_ : Dict = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
UpperCAmelCase_ : Optional[int] = {}
with swallow_io():
with time_limit(__lowercase ):
exec(__lowercase , __lowercase )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(F'''failed: {e}''' )
# Needed for cleaning up.
UpperCAmelCase_ : Optional[int] = rmtree
UpperCAmelCase_ : Optional[Any] = rmdir
UpperCAmelCase_ : Optional[Any] = chdir
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
def signal_handler(__lowercase , __lowercase ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , __lowercase )
signal.signal(signal.SIGALRM , __lowercase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def snake_case_ ( ):
UpperCAmelCase_ : Optional[Any] = WriteOnlyStringIO()
with contextlib.redirect_stdout(__lowercase ):
with contextlib.redirect_stderr(__lowercase ):
with redirect_stdin(__lowercase ):
yield
@contextlib.contextmanager
def snake_case_ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(__lowercase ):
yield dirname
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
pass
class lowerCAmelCase__( io.StringIO ):
'''simple docstring'''
def _lowerCamelCase ( self : Dict , *__snake_case : List[Any] , **__snake_case : int ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Dict , *__snake_case : int , **__snake_case : Any ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : int , *__snake_case : List[str] , **__snake_case : Optional[Any] ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
'''simple docstring'''
return False
class lowerCAmelCase__( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
A_ : Optional[Any] = 'stdin'
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
if root == ".":
yield
return
UpperCAmelCase_ : Tuple = os.getcwd()
os.chdir(__lowercase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__lowercase )
def snake_case_ ( __lowercase=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = None
import os
UpperCAmelCase_ : Union[str, Any] = '''1'''
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
import shutil
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
import subprocess
UpperCAmelCase_ : Dict = None # type: ignore
UpperCAmelCase_ : Union[str, Any] = None
import sys
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
| 641
| 0
|
import math
from collections.abc import Iterator
from itertools import takewhile
def snake_case_ ( __lowercase ) -> str:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case_ ( ) -> Tuple:
UpperCAmelCase_ : str = 2
while True:
if is_prime(__lowercase ):
yield num
num += 1
def snake_case_ ( __lowercase = 2_0_0_0_0_0_0 ) -> List[str]:
return sum(takewhile(lambda __lowercase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : int = 'falcon'
A_ : int = ['past_key_values']
def __init__( self : Optional[Any] , __snake_case : Tuple=65_024 , __snake_case : List[str]=4_544 , __snake_case : Optional[Any]=32 , __snake_case : Any=71 , __snake_case : str=1E-5 , __snake_case : List[str]=0.02 , __snake_case : List[Any]=True , __snake_case : Dict=0.0 , __snake_case : Optional[Any]=0.0 , __snake_case : Any=None , __snake_case : List[Any]=False , __snake_case : Dict=False , __snake_case : Optional[int]=True , __snake_case : List[Any]=True , __snake_case : Optional[Any]=False , __snake_case : Dict=11 , __snake_case : List[str]=11 , **__snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : int = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ : Union[str, Any] = kwargs.pop('''n_embed''' , __snake_case )
UpperCAmelCase_ : str = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : Optional[int] = layer_norm_epsilon
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[int] = use_cache
UpperCAmelCase_ : List[Any] = hidden_dropout
UpperCAmelCase_ : Any = attention_dropout
UpperCAmelCase_ : Tuple = bos_token_id
UpperCAmelCase_ : List[Any] = eos_token_id
UpperCAmelCase_ : Any = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ : Optional[int] = alibi
UpperCAmelCase_ : Dict = new_decoder_architecture
UpperCAmelCase_ : List[Any] = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ : Tuple = parallel_attn
UpperCAmelCase_ : List[Any] = bias
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return not self.alibi
| 641
| 0
|
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , snake_case__ , )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[Any] = RobertaConfig
A_ : List[Any] = 'roberta'
def __init__( self : str , __snake_case : Tuple ):
'''simple docstring'''
super().__init__(__snake_case )
UpperCAmelCase_ : Tuple = RobertaEmbeddings(__snake_case )
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , snake_case__ , )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = RobertaConfig
A_ : int = 'roberta'
def __init__( self : str , __snake_case : Optional[int] ):
'''simple docstring'''
super().__init__(__snake_case )
UpperCAmelCase_ : int = config.num_labels
UpperCAmelCase_ : Dict = config.num_hidden_layers
UpperCAmelCase_ : Dict = DeeRobertaModel(__snake_case )
UpperCAmelCase_ : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase_ : str = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__snake_case )
def _lowerCamelCase ( self : List[Any] , __snake_case : int=None , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Union[str, Any]=None , __snake_case : List[str]=None , __snake_case : Optional[int]=None , __snake_case : List[Any]=None , __snake_case : Optional[int]=-1 , __snake_case : List[str]=False , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.num_layers
try:
UpperCAmelCase_ : List[Any] = self.roberta(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , position_ids=__snake_case , head_mask=__snake_case , inputs_embeds=__snake_case , )
UpperCAmelCase_ : Optional[int] = outputs[1]
UpperCAmelCase_ : List[Any] = self.dropout(__snake_case )
UpperCAmelCase_ : str = self.classifier(__snake_case )
UpperCAmelCase_ : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCAmelCase_ : Optional[Any] = e.message
UpperCAmelCase_ : Tuple = e.exit_layer
UpperCAmelCase_ : Any = outputs[0]
if not self.training:
UpperCAmelCase_ : Tuple = entropy(__snake_case )
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ : Optional[int] = MSELoss()
UpperCAmelCase_ : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase_ : Any = CrossEntropyLoss()
UpperCAmelCase_ : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCAmelCase_ : int = []
for highway_exit in outputs[-1]:
UpperCAmelCase_ : Optional[int] = highway_exit[0]
if not self.training:
highway_logits_all.append(__snake_case )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ : Union[str, Any] = MSELoss()
UpperCAmelCase_ : str = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase_ : Tuple = CrossEntropyLoss()
UpperCAmelCase_ : Optional[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__snake_case )
if train_highway:
UpperCAmelCase_ : Optional[int] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCAmelCase_ : Optional[Any] = (loss,) + outputs
if not self.training:
UpperCAmelCase_ : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCAmelCase_ : List[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 713
|
def snake_case_ ( __lowercase ):
return " ".join(
''''''.join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 641
| 0
|
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( __lowercase , __lowercase ):
print(F'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(__lowercase ):
print(F'''{i}\t\t{d}''' )
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
for j in range(__lowercase ):
UpperCAmelCase_ : str = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : int = [float('''inf''' )] * vertex_count
UpperCAmelCase_ : Optional[Any] = 0.0
for _ in range(vertex_count - 1 ):
for j in range(__lowercase ):
UpperCAmelCase_ : Optional[int] = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
UpperCAmelCase_ : Tuple = distance[u] + w
UpperCAmelCase_ : Dict = check_negative_cycle(__lowercase , __lowercase , __lowercase )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : Optional[int] = int(input('Enter number of vertices: ').strip())
__UpperCamelCase : Optional[int] = int(input('Enter number of edges: ').strip())
__UpperCamelCase : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
__UpperCamelCase : Optional[int] = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
__UpperCamelCase : Optional[Any] = {'src': src, 'dst': dest, 'weight': weight}
__UpperCamelCase : Union[str, Any] = int(input('\nEnter shortest path source:').strip())
__UpperCamelCase : Union[str, Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 714
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase : str = 'true'
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=1_6 ):
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = RegressionModel()
UpperCAmelCase_ : Optional[int] = deepcopy(__lowercase )
UpperCAmelCase_ : Union[str, Any] = RegressionDataset(length=__lowercase )
UpperCAmelCase_ : Any = DataLoader(__lowercase , batch_size=__lowercase )
model.to(accelerator.device )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.prepare(__lowercase , __lowercase )
return model, ddp_model, dataloader
def snake_case_ ( __lowercase , __lowercase=False ):
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
UpperCAmelCase_ : List[Any] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(__lowercase ):
UpperCAmelCase_ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowercase , max_length=__lowercase )
return outputs
with accelerator.main_process_first():
UpperCAmelCase_ : List[str] = dataset.map(
__lowercase , batched=__lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
UpperCAmelCase_ : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowercase ):
if use_longest:
return tokenizer.pad(__lowercase , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(__lowercase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return DataLoader(__lowercase , shuffle=__lowercase , collate_fn=__lowercase , batch_size=1_6 )
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[int] = Accelerator(dispatch_batches=__lowercase , split_batches=__lowercase )
UpperCAmelCase_ : int = get_dataloader(__lowercase , not dispatch_batches )
UpperCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(__lowercase , __lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Dict = []
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = batch.values()
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase_ , UpperCAmelCase_ : Any = [], []
for logit, targ in logits_and_targets:
logits.append(__lowercase )
targs.append(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = torch.cat(__lowercase ), torch.cat(__lowercase )
return logits, targs
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=False , __lowercase=False , __lowercase=1_6 ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = get_basic_setup(__lowercase , __lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = generate_predictions(__lowercase , __lowercase , __lowercase )
assert (
len(__lowercase ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__lowercase )}'''
def snake_case_ ( __lowercase = False , __lowercase = False ):
UpperCAmelCase_ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_mrpc_setup(__lowercase , __lowercase )
# First do baseline
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = setup['''no''']
model.to(__lowercase )
model.eval()
for batch in dataloader:
batch.to(__lowercase )
with torch.inference_mode():
UpperCAmelCase_ : str = model(**__lowercase )
UpperCAmelCase_ : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__lowercase , references=batch['''labels'''] )
UpperCAmelCase_ : Optional[int] = metric.compute()
# Then do distributed
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase_ : Optional[int] = model(**__lowercase )
UpperCAmelCase_ : int = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : Optional[int] = batch['''labels''']
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__lowercase , references=__lowercase )
UpperCAmelCase_ : Dict = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def snake_case_ ( ):
UpperCAmelCase_ : str = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__lowercase , __lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase_ : Optional[Any] = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__lowercase , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
UpperCAmelCase_ : List[Any] = Accelerator()
test_torch_metrics(__lowercase , 5_1_2 )
accelerator.state._reset_state()
def snake_case_ ( __lowercase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 641
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Any = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 0
|
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Optional[Any] = int(__lowercase )
UpperCAmelCase_ : Dict = t // 3_6_0_0, (t // 6_0) % 6_0, t % 6_0
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=3_0_0 ):
# docstyle-ignore
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[Any] = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
UpperCAmelCase_ : Tuple = F'''{elt:.6f}''' if isinstance(__lowercase , __lowercase ) else str(__lowercase )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCAmelCase__:
'''simple docstring'''
A_ : int = 5
A_ : List[Any] = 0.2
def __init__( self : List[str] , __snake_case : int , __snake_case : Optional[str] = None , __snake_case : bool = True , __snake_case : Optional["NotebookTrainingTracker"] = None , __snake_case : int = 300 , ):
'''simple docstring'''
UpperCAmelCase_ : Any = total
UpperCAmelCase_ : Union[str, Any] = '''''' if prefix is None else prefix
UpperCAmelCase_ : str = leave
UpperCAmelCase_ : Optional[int] = parent
UpperCAmelCase_ : List[Any] = width
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Any = None
def _lowerCamelCase ( self : Optional[int] , __snake_case : int , __snake_case : bool = False , __snake_case : str = None ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = value
if comment is not None:
UpperCAmelCase_ : int = comment
if self.last_value is None:
UpperCAmelCase_ : int = time.time()
UpperCAmelCase_ : Optional[int] = value
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = self.warmup
UpperCAmelCase_ : List[Any] = 1
self.update_bar(__snake_case )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
UpperCAmelCase_ : Union[str, Any] = time.time()
UpperCAmelCase_ : List[str] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
UpperCAmelCase_ : int = self.elapsed_time / (value - self.start_value)
else:
UpperCAmelCase_ : Optional[int] = None
if value >= self.total:
UpperCAmelCase_ : Optional[int] = self.total
UpperCAmelCase_ : str = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
UpperCAmelCase_ : Any = self.average_time_per_item * (self.total - value)
self.update_bar(__snake_case )
UpperCAmelCase_ : List[str] = value
UpperCAmelCase_ : Tuple = current_time
if self.average_time_per_item is None:
UpperCAmelCase_ : Dict = 1
else:
UpperCAmelCase_ : int = max(int(self.update_every / self.average_time_per_item ) , 1 )
def _lowerCamelCase ( self : Tuple , __snake_case : List[Any] , __snake_case : List[str]=None ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ''' ''' * (len(str(self.total ) ) - len(str(__snake_case ) )) + str(__snake_case )
if self.elapsed_time is None:
UpperCAmelCase_ : int = f'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
UpperCAmelCase_ : Optional[Any] = f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
UpperCAmelCase_ : Tuple = (
f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
f''' {format_time(self.predicted_remaining )}'''
)
self.label += f''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f''', {self.comment}]'''
self.display()
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
UpperCAmelCase_ : Optional[int] = disp.display(disp.HTML(self.html_code ) , display_id=__snake_case )
else:
self.output.update(disp.HTML(self.html_code ) )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
def __init__( self : Dict , __snake_case : Optional[int] , __snake_case : Optional[Any]=None ):
'''simple docstring'''
super().__init__(__snake_case )
UpperCAmelCase_ : Tuple = None if column_names is None else [column_names]
UpperCAmelCase_ : Dict = None
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
UpperCAmelCase_ : Dict = disp.display(disp.HTML(self.html_code ) , display_id=__snake_case )
else:
self.output.update(disp.HTML(self.html_code ) )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Optional[int] ):
'''simple docstring'''
if self.inner_table is None:
UpperCAmelCase_ : List[Any] = [list(values.keys() ), list(values.values() )]
else:
UpperCAmelCase_ : Union[str, Any] = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__snake_case )
UpperCAmelCase_ : Union[str, Any] = columns
self.inner_table.append([values[c] for c in columns] )
def _lowerCamelCase ( self : Dict , __snake_case : Dict , __snake_case : Any=None , __snake_case : List[str]=300 ):
'''simple docstring'''
UpperCAmelCase_ : str = NotebookProgressBar(__snake_case , prefix=__snake_case , parent=self , width=__snake_case )
return self.child_bar
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Dict = None
self.display()
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
def __init__( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : str = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Optional[Any] = False
def _lowerCamelCase ( self : Optional[Any] , __snake_case : str , __snake_case : Any , __snake_case : str , **__snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Optional[int] = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
UpperCAmelCase_ : Any = NotebookTrainingTracker(state.max_steps , __snake_case )
def _lowerCamelCase ( self : Any , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , **__snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = int(state.epoch ) if int(state.epoch ) == state.epoch else f'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=f'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
UpperCAmelCase_ : str = False
def _lowerCamelCase ( self : str , __snake_case : Optional[int] , __snake_case : int , __snake_case : int , __snake_case : Dict=None , **__snake_case : int ):
'''simple docstring'''
if not has_length(__snake_case ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
UpperCAmelCase_ : List[Any] = self.training_tracker.add_child(len(__snake_case ) )
else:
UpperCAmelCase_ : List[str] = NotebookProgressBar(len(__snake_case ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def _lowerCamelCase ( self : List[Any] , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Dict , **__snake_case : Union[str, Any] ):
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
UpperCAmelCase_ : List[str] = None
def _lowerCamelCase ( self : Optional[Any] , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Union[str, Any]=None , **__snake_case : Dict ):
'''simple docstring'''
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
UpperCAmelCase_ : Union[str, Any] = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
UpperCAmelCase_ : str = state.global_step
self.training_tracker.write_line(__snake_case )
def _lowerCamelCase ( self : Any , __snake_case : Dict , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : int=None , **__snake_case : int ):
'''simple docstring'''
if self.training_tracker is not None:
UpperCAmelCase_ : List[Any] = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
UpperCAmelCase_ : Dict = log['''loss''']
break
if self.first_column == "Epoch":
UpperCAmelCase_ : Dict = int(state.epoch )
else:
UpperCAmelCase_ : Dict = state.global_step
UpperCAmelCase_ : Dict = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
UpperCAmelCase_ : Optional[Any] = re.sub(R'''\_loss$''' , '''''' , __snake_case )
UpperCAmelCase_ : Union[str, Any] = metrics.pop('''total_flos''' , __snake_case )
UpperCAmelCase_ : str = metrics.pop('''epoch''' , __snake_case )
UpperCAmelCase_ : Optional[Any] = metrics.pop(f'''{metric_key_prefix}_runtime''' , __snake_case )
UpperCAmelCase_ : int = metrics.pop(f'''{metric_key_prefix}_samples_per_second''' , __snake_case )
UpperCAmelCase_ : int = metrics.pop(f'''{metric_key_prefix}_steps_per_second''' , __snake_case )
UpperCAmelCase_ : Optional[int] = metrics.pop(f'''{metric_key_prefix}_jit_compilation_time''' , __snake_case )
for k, v in metrics.items():
if k == f'''{metric_key_prefix}_loss''':
UpperCAmelCase_ : Tuple = v
else:
UpperCAmelCase_ : Optional[Any] = k.split('''_''' )
UpperCAmelCase_ : Optional[Any] = ''' '''.join([part.capitalize() for part in splits[1:]] )
UpperCAmelCase_ : Optional[Any] = v
self.training_tracker.write_line(__snake_case )
self.training_tracker.remove_child()
UpperCAmelCase_ : Tuple = None
# Evaluation takes a long time so we should force the next update.
UpperCAmelCase_ : Any = True
def _lowerCamelCase ( self : str , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Tuple , **__snake_case : str ):
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=f'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__snake_case )
UpperCAmelCase_ : List[str] = None
| 716
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : List[str] , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = data
UpperCAmelCase_ : List[Any] = [0X67_45_23_01, 0Xef_cd_ab_89, 0X98_ba_dc_fe, 0X10_32_54_76, 0Xc3_d2_e1_f0]
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xff_ff_ff_ff
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase_ : Union[str, Any] = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _lowerCamelCase ( self : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Any = list(struct.unpack('''>16L''' , __snake_case ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase_ : str = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.padding()
UpperCAmelCase_ : str = self.split_blocks()
for block in self.blocks:
UpperCAmelCase_ : Any = self.expand_block(__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase_ : Optional[Any] = (b & c) | ((~b) & d)
UpperCAmelCase_ : Optional[Any] = 0X5a_82_79_99
elif 20 <= i < 40:
UpperCAmelCase_ : List[Any] = b ^ c ^ d
UpperCAmelCase_ : str = 0X6e_d9_eb_a1
elif 40 <= i < 60:
UpperCAmelCase_ : str = (b & c) | (b & d) | (c & d)
UpperCAmelCase_ : Optional[int] = 0X8f_1b_bc_dc
elif 60 <= i < 80:
UpperCAmelCase_ : Union[str, Any] = b ^ c ^ d
UpperCAmelCase_ : Dict = 0Xca_62_c1_d6
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = (
self.rotate(__snake_case , 5 ) + f + e + k + expanded_block[i] & 0Xff_ff_ff_ff,
a,
self.rotate(__snake_case , 30 ),
c,
d,
)
UpperCAmelCase_ : Optional[Any] = (
self.h[0] + a & 0Xff_ff_ff_ff,
self.h[1] + b & 0Xff_ff_ff_ff,
self.h[2] + c & 0Xff_ff_ff_ff,
self.h[3] + d & 0Xff_ff_ff_ff,
self.h[4] + e & 0Xff_ff_ff_ff,
)
return ("{:08x}" * 5).format(*self.h )
def snake_case_ ( ):
UpperCAmelCase_ : Tuple = B'''Test String'''
assert SHAaHash(__lowercase ).final_hash() == hashlib.shaa(__lowercase ).hexdigest() # noqa: S324
def snake_case_ ( ):
UpperCAmelCase_ : int = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
UpperCAmelCase_ : List[Any] = parser.parse_args()
UpperCAmelCase_ : Optional[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
UpperCAmelCase_ : List[str] = f.read()
else:
UpperCAmelCase_ : Tuple = bytes(__lowercase , '''utf-8''' )
print(SHAaHash(__lowercase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 641
| 0
|
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase__( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = VQModel
A_ : Optional[int] = 'sample'
@property
def _lowerCamelCase ( self : List[Any] , __snake_case : Union[str, Any]=(32, 32) ):
'''simple docstring'''
UpperCAmelCase_ : int = 4
UpperCAmelCase_ : List[str] = 3
UpperCAmelCase_ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
return {"sample": image}
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return (3, 32, 32)
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return (3, 32, 32)
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
UpperCAmelCase_ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self : int ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(__snake_case )
UpperCAmelCase_ : Dict = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(__snake_case ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
UpperCAmelCase_ : Union[str, Any] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
UpperCAmelCase_ : List[Any] = image.to(__snake_case )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(__snake_case ).sample
UpperCAmelCase_ : Tuple = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase_ : Dict = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-3 ) )
| 717
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = 'timesformer'
def __init__( self : int , __snake_case : Any=224 , __snake_case : str=16 , __snake_case : Any=3 , __snake_case : List[Any]=8 , __snake_case : Dict=768 , __snake_case : Dict=12 , __snake_case : Tuple=12 , __snake_case : Dict=3_072 , __snake_case : str="gelu" , __snake_case : Union[str, Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.02 , __snake_case : Optional[Any]=1E-6 , __snake_case : List[Any]=True , __snake_case : List[str]="divided_space_time" , __snake_case : Optional[int]=0 , **__snake_case : Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : int = num_frames
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Dict = attention_type
UpperCAmelCase_ : str = drop_path_rate
| 641
| 0
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def snake_case_ ( __lowercase : Optional[Any] ):
UpperCAmelCase_ : str = {}
UpperCAmelCase_ : Tuple = tokenizer(example['''content'''] , truncation=__lowercase )['''input_ids''']
UpperCAmelCase_ : Dict = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
__UpperCamelCase : str = HfArgumentParser(PretokenizationArguments)
__UpperCamelCase : Union[str, Any] = parser.parse_args()
if args.num_workers is None:
__UpperCamelCase : Any = multiprocessing.cpu_count()
__UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__UpperCamelCase : int = time.time()
__UpperCamelCase : Dict = load_dataset(args.dataset_name, split='train')
print(F'Dataset loaded in {time.time()-t_start:.2f}s')
__UpperCamelCase : Optional[int] = time.time()
__UpperCamelCase : Optional[int] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'Dataset tokenized in {time.time()-t_start:.2f}s')
__UpperCamelCase : Any = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'Data pushed to the hub in {time.time()-t_start:.2f}s')
| 718
|
import math
import qiskit
def snake_case_ ( __lowercase = 1 , __lowercase = 1 , __lowercase = 1 ):
if (
isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
UpperCAmelCase_ : Any = qiskit.QuantumRegister(4 , '''qr''' )
UpperCAmelCase_ : List[str] = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
UpperCAmelCase_ : Any = [input_a, input_a, carry_in]
UpperCAmelCase_ : Dict = qiskit.QuantumCircuit(__lowercase , __lowercase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__lowercase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__lowercase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__lowercase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __lowercase ) # measure the last two qbits
UpperCAmelCase_ : Optional[int] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCAmelCase_ : List[str] = qiskit.execute(__lowercase , __lowercase , shots=1_0_0_0 )
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 641
| 0
|
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Tuple = len(__lowercase )
while cur > 1:
# Find the maximum number in arr
UpperCAmelCase_ : Union[str, Any] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
UpperCAmelCase_ : Optional[int] = arr[mi::-1] + arr[mi + 1 : len(__lowercase )]
# Reverse whole list
UpperCAmelCase_ : int = arr[cur - 1 :: -1] + arr[cur : len(__lowercase )]
cur -= 1
return arr
if __name__ == "__main__":
__UpperCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
__UpperCamelCase : Tuple = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 719
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[str] = 'gpt_bigcode'
A_ : Optional[Any] = ['past_key_values']
A_ : Optional[int] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int , __snake_case : Dict=50_257 , __snake_case : List[str]=1_024 , __snake_case : Dict=768 , __snake_case : Optional[int]=12 , __snake_case : str=12 , __snake_case : List[str]=None , __snake_case : List[str]="gelu_pytorch_tanh" , __snake_case : Optional[Any]=0.1 , __snake_case : str=0.1 , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=1E-5 , __snake_case : Dict=0.02 , __snake_case : Optional[int]=True , __snake_case : Tuple=True , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=True , __snake_case : Optional[Any]=True , __snake_case : List[Any]=True , **__snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : int = n_positions
UpperCAmelCase_ : Any = n_embd
UpperCAmelCase_ : Union[str, Any] = n_layer
UpperCAmelCase_ : List[str] = n_head
UpperCAmelCase_ : List[Any] = n_inner
UpperCAmelCase_ : Optional[int] = activation_function
UpperCAmelCase_ : str = resid_pdrop
UpperCAmelCase_ : Optional[Any] = embd_pdrop
UpperCAmelCase_ : str = attn_pdrop
UpperCAmelCase_ : Any = layer_norm_epsilon
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Optional[int] = scale_attn_weights
UpperCAmelCase_ : Union[str, Any] = use_cache
UpperCAmelCase_ : Dict = attention_softmax_in_fpaa
UpperCAmelCase_ : Union[str, Any] = scale_attention_softmax_in_fpaa
UpperCAmelCase_ : Optional[int] = multi_query
UpperCAmelCase_ : Optional[Any] = bos_token_id
UpperCAmelCase_ : Tuple = eos_token_id
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
| 641
| 0
|
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__( snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : str = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _lowerCamelCase ( self : List[Any] , __snake_case : Tuple=0 ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(__snake_case ) )
UpperCAmelCase_ : Dict = np.random.RandomState(__snake_case )
UpperCAmelCase_ : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs()
UpperCAmelCase_ : List[Any] = pipe(**__snake_case ).images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ : List[str] = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : Optional[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs()
UpperCAmelCase_ : List[Any] = pipe(**__snake_case ).images
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
# warmup pass to apply optimizations
UpperCAmelCase_ : Dict = pipe(**self.get_dummy_inputs() )
UpperCAmelCase_ : Any = self.get_dummy_inputs()
UpperCAmelCase_ : int = pipe(**__snake_case ).images
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ : List[Any] = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Any = self.get_dummy_inputs()
UpperCAmelCase_ : str = pipe(**__snake_case ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ : Tuple = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : Union[str, Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Any = self.get_dummy_inputs()
UpperCAmelCase_ : List[str] = pipe(**__snake_case ).images
UpperCAmelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ : Any = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs()
UpperCAmelCase_ : List[str] = pipe(**__snake_case ).images
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ort.SessionOptions()
UpperCAmelCase_ : Tuple = False
return options
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
UpperCAmelCase_ : str = init_image.resize((768, 512) )
# using the PNDM scheduler by default
UpperCAmelCase_ : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : List[Any] = '''A fantasy landscape, trending on artstation'''
UpperCAmelCase_ : str = np.random.RandomState(0 )
UpperCAmelCase_ : int = pipe(
prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__snake_case , output_type='''np''' , )
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase_ : Any = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
UpperCAmelCase_ : Dict = init_image.resize((768, 512) )
UpperCAmelCase_ : Tuple = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
UpperCAmelCase_ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__snake_case , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Optional[Any] = '''A fantasy landscape, trending on artstation'''
UpperCAmelCase_ : Dict = np.random.RandomState(0 )
UpperCAmelCase_ : Any = pipe(
prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__snake_case , output_type='''np''' , )
UpperCAmelCase_ : Optional[Any] = output.images
UpperCAmelCase_ : int = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 720
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( __lowercase , __lowercase , __lowercase=None , **__lowercase ):
UpperCAmelCase_ : Tuple = [x.strip() for x in open(__lowercase ).readlines()]
UpperCAmelCase_ : Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )]
UpperCAmelCase_ : int = calculate_rouge(__lowercase , __lowercase , **__lowercase )
if save_path is not None:
save_json(__lowercase , __lowercase , indent=__lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 641
| 0
|
def snake_case_ ( __lowercase = 5_0 ):
UpperCAmelCase_ : Optional[int] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'{solution() = }')
| 721
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : int , __snake_case : List[Any] , __snake_case : List[Any]=13 , __snake_case : str=7 , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=True , __snake_case : str=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=True , __snake_case : List[str]=False , __snake_case : List[str]=False , __snake_case : Tuple=False , __snake_case : List[str]=2 , __snake_case : Optional[int]=99 , __snake_case : Tuple=0 , __snake_case : int=32 , __snake_case : Optional[int]=5 , __snake_case : str=4 , __snake_case : str=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : List[str]=512 , __snake_case : Tuple=2 , __snake_case : List[Any]=0.02 , __snake_case : Any=2 , __snake_case : Optional[int]=4 , __snake_case : Optional[Any]="last" , __snake_case : Dict=True , __snake_case : Any=None , __snake_case : str=0 , ):
'''simple docstring'''
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : List[Any] = use_input_lengths
UpperCAmelCase_ : Dict = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : int = gelu_activation
UpperCAmelCase_ : str = sinusoidal_embeddings
UpperCAmelCase_ : List[str] = causal
UpperCAmelCase_ : Tuple = asm
UpperCAmelCase_ : List[Any] = n_langs
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Any = n_special
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : List[Any] = num_choices
UpperCAmelCase_ : Any = summary_type
UpperCAmelCase_ : Optional[int] = use_proj
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = bos_token_id
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
if self.use_input_lengths:
UpperCAmelCase_ : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : int = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case , lengths=__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : str , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Tuple , __snake_case : Any , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
UpperCAmelCase_ : Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : str , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : List[str] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
UpperCAmelCase_ : Optional[Any] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((UpperCAmelCase_) , ) : Union[str, Any] = result_with_labels.to_tuple()
UpperCAmelCase_ : Optional[int] = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((UpperCAmelCase_) , ) : str = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : Optional[int] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : Any , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Optional[int] = XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : List[str] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : List[Any] , __snake_case : str , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ : int = self.num_choices
UpperCAmelCase_ : int = XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Any = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A_ : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self : str , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : str=False ):
'''simple docstring'''
UpperCAmelCase_ : int = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
UpperCAmelCase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = XLMModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def _lowerCamelCase ( self : str , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[int] , __snake_case : Union[str, Any]=False , __snake_case : Optional[Any]=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : Dict = min_length + idx + 1
UpperCAmelCase_ : List[Any] = min_length + idx + 1
UpperCAmelCase_ : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def _lowerCamelCase ( self : List[Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[Any]=False , __snake_case : str=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : str = min_length + idx + 1
UpperCAmelCase_ : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(__snake_case )
UpperCAmelCase_ : str = torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
UpperCAmelCase_ : Union[str, Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase_ : Dict = model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 641
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Union[str, Any] = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 700
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__UpperCamelCase : Dict = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(snake_case__ )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'rag'
A_ : Tuple = True
def __init__( self : int , __snake_case : List[str]=None , __snake_case : List[Any]=True , __snake_case : Optional[int]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : Any=None , __snake_case : Optional[int]=None , __snake_case : Optional[int]=" / " , __snake_case : Any=" // " , __snake_case : Tuple=5 , __snake_case : Union[str, Any]=300 , __snake_case : Any=768 , __snake_case : Tuple=8 , __snake_case : int="wiki_dpr" , __snake_case : Optional[int]="train" , __snake_case : Tuple="compressed" , __snake_case : Optional[int]=None , __snake_case : List[Any]=None , __snake_case : Optional[int]=False , __snake_case : str=False , __snake_case : Dict=0.0 , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=False , __snake_case : str=False , __snake_case : str=False , __snake_case : Optional[Any]=True , __snake_case : int=None , **__snake_case : str , ):
'''simple docstring'''
super().__init__(
bos_token_id=__snake_case , pad_token_id=__snake_case , eos_token_id=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , is_encoder_decoder=__snake_case , prefix=__snake_case , vocab_size=__snake_case , **__snake_case , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCAmelCase_ : Dict = kwargs.pop('''question_encoder''' )
UpperCAmelCase_ : List[Any] = question_encoder_config.pop('''model_type''' )
UpperCAmelCase_ : Any = kwargs.pop('''generator''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : int = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = reduce_loss
UpperCAmelCase_ : List[Any] = label_smoothing
UpperCAmelCase_ : Tuple = exclude_bos_score
UpperCAmelCase_ : int = do_marginalize
UpperCAmelCase_ : Tuple = title_sep
UpperCAmelCase_ : Union[str, Any] = doc_sep
UpperCAmelCase_ : Any = n_docs
UpperCAmelCase_ : Optional[int] = max_combined_length
UpperCAmelCase_ : Any = dataset
UpperCAmelCase_ : List[Any] = dataset_split
UpperCAmelCase_ : Union[str, Any] = index_name
UpperCAmelCase_ : List[str] = retrieval_vector_size
UpperCAmelCase_ : Optional[Any] = retrieval_batch_size
UpperCAmelCase_ : Optional[int] = passages_path
UpperCAmelCase_ : Optional[Any] = index_path
UpperCAmelCase_ : List[Any] = use_dummy_dataset
UpperCAmelCase_ : int = output_retrieved
UpperCAmelCase_ : int = do_deduplication
UpperCAmelCase_ : Optional[int] = use_cache
if self.forced_eos_token_id is None:
UpperCAmelCase_ : int = getattr(self.generator , '''forced_eos_token_id''' , __snake_case )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : str ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Optional[int] = self.question_encoder.to_dict()
UpperCAmelCase_ : Dict = self.generator.to_dict()
UpperCAmelCase_ : Optional[Any] = self.__class__.model_type
return output
| 641
| 0
|
def snake_case_ ( __lowercase ):
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
UpperCAmelCase_ : Any = [True] * (num + 1)
UpperCAmelCase_ : Any = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowercase ):
UpperCAmelCase_ : Optional[Any] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : Optional[Any] = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 701
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def snake_case_ ( ):
UpperCAmelCase_ : str = HfArgumentParser(__lowercase )
UpperCAmelCase_ : Optional[Any] = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase_ : Optional[int] = TensorFlowBenchmark(args=__lowercase )
try:
UpperCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCAmelCase_ : List[Any] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
UpperCAmelCase_ : List[str] = ''' '''.join(str(__lowercase ).split(''' ''' )[:-1] )
UpperCAmelCase_ : Optional[int] = ''''''
UpperCAmelCase_ : Dict = eval(str(__lowercase ).split(''' ''' )[-1] )
UpperCAmelCase_ : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__lowercase )
if len(__lowercase ) > 0:
UpperCAmelCase_ : Tuple = full_error_msg + begin_error_msg + str(__lowercase )
raise ValueError(__lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 641
| 0
|
def snake_case_ ( __lowercase ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[Any] = 'unispeech-sat'
def __init__( self : int , __snake_case : Optional[int]=32 , __snake_case : Dict=768 , __snake_case : Optional[Any]=12 , __snake_case : Optional[int]=12 , __snake_case : Dict=3_072 , __snake_case : List[str]="gelu" , __snake_case : Any=0.1 , __snake_case : Tuple=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Tuple=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Dict=0.02 , __snake_case : Optional[Any]=1E-5 , __snake_case : Optional[int]="group" , __snake_case : str="gelu" , __snake_case : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , __snake_case : str=(5, 2, 2, 2, 2, 2, 2) , __snake_case : Tuple=(10, 3, 3, 3, 3, 2, 2) , __snake_case : int=False , __snake_case : Optional[int]=128 , __snake_case : Any=16 , __snake_case : Union[str, Any]=False , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=0.05 , __snake_case : Dict=10 , __snake_case : int=2 , __snake_case : Optional[Any]=0.0 , __snake_case : Optional[int]=10 , __snake_case : List[Any]=0 , __snake_case : Optional[int]=320 , __snake_case : int=2 , __snake_case : Any=0.1 , __snake_case : Optional[int]=100 , __snake_case : Tuple=256 , __snake_case : List[str]=256 , __snake_case : List[Any]=0.1 , __snake_case : Tuple="mean" , __snake_case : List[Any]=False , __snake_case : List[str]=False , __snake_case : Optional[Any]=256 , __snake_case : Tuple=(512, 512, 512, 512, 1_500) , __snake_case : Optional[int]=(5, 3, 3, 1, 1) , __snake_case : Any=(1, 2, 3, 1, 1) , __snake_case : int=512 , __snake_case : Optional[int]=0 , __snake_case : Dict=1 , __snake_case : Tuple=2 , __snake_case : Union[str, Any]=504 , **__snake_case : List[str] , ):
'''simple docstring'''
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : int = feat_extract_norm
UpperCAmelCase_ : Dict = feat_extract_activation
UpperCAmelCase_ : Union[str, Any] = list(__snake_case )
UpperCAmelCase_ : List[str] = list(__snake_case )
UpperCAmelCase_ : Any = list(__snake_case )
UpperCAmelCase_ : Any = conv_bias
UpperCAmelCase_ : List[str] = num_conv_pos_embeddings
UpperCAmelCase_ : Dict = num_conv_pos_embedding_groups
UpperCAmelCase_ : Optional[int] = len(self.conv_dim )
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = hidden_dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Optional[Any] = activation_dropout
UpperCAmelCase_ : Dict = feat_proj_dropout
UpperCAmelCase_ : Optional[Any] = final_dropout
UpperCAmelCase_ : List[Any] = layerdrop
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : int = num_clusters
UpperCAmelCase_ : int = do_stable_layer_norm
UpperCAmelCase_ : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : int = apply_spec_augment
UpperCAmelCase_ : Optional[Any] = mask_time_prob
UpperCAmelCase_ : str = mask_time_length
UpperCAmelCase_ : Any = mask_time_min_masks
UpperCAmelCase_ : str = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Optional[int] = num_codevectors_per_group
UpperCAmelCase_ : int = num_codevector_groups
UpperCAmelCase_ : List[str] = contrastive_logits_temperature
UpperCAmelCase_ : int = feat_quantizer_dropout
UpperCAmelCase_ : List[str] = num_negatives
UpperCAmelCase_ : Any = codevector_dim
UpperCAmelCase_ : Tuple = proj_codevector_dim
UpperCAmelCase_ : Union[str, Any] = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : Any = ctc_loss_reduction
UpperCAmelCase_ : Optional[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Union[str, Any] = xvector_output_dim
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 641
| 0
|
import operator
def snake_case_ ( __lowercase , __lowercase = False , __lowercase = None ):
UpperCAmelCase_ : Any = operator.lt if reverse else operator.gt
UpperCAmelCase_ : Optional[int] = solution or []
if not arr:
return solution
UpperCAmelCase_ : Union[str, Any] = [arr.pop(0 )]
for i, item in enumerate(__lowercase ):
if _operator(__lowercase , sublist[-1] ):
sublist.append(__lowercase )
arr.pop(__lowercase )
# merging sublist into solution list
if not solution:
solution.extend(__lowercase )
else:
while sublist:
UpperCAmelCase_ : Any = sublist.pop(0 )
for i, xx in enumerate(__lowercase ):
if not _operator(__lowercase , __lowercase ):
solution.insert(__lowercase , __lowercase )
break
else:
solution.append(__lowercase )
strand_sort(__lowercase , __lowercase , __lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 703
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case_ ( __lowercase , __lowercase ):
# Load checkpoint
UpperCAmelCase_ : Tuple = torch.load(__lowercase , map_location='''cpu''' )
UpperCAmelCase_ : Optional[int] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
UpperCAmelCase_ : str = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCAmelCase_ : Tuple = v
else:
UpperCAmelCase_ : Union[str, Any] = v
UpperCAmelCase_ : int = chkpt['''params''']
UpperCAmelCase_ : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(__lowercase , (torch.FloatTensor, numpy.ndarray) )}
UpperCAmelCase_ : int = chkpt['''dico_word2id''']
UpperCAmelCase_ : List[Any] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 1_3 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(__lowercase , __lowercase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : Dict = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 641
| 0
|
import qiskit
def snake_case_ ( __lowercase = 2 ):
UpperCAmelCase_ : Union[str, Any] = qubits
# Using Aer's simulator
UpperCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
# Creating a Quantum Circuit acting on the q register
UpperCAmelCase_ : Any = qiskit.QuantumCircuit(__lowercase , __lowercase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , __lowercase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , __lowercase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(__lowercase ) ) , list(range(__lowercase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
UpperCAmelCase_ : List[str] = qiskit.execute(__lowercase , __lowercase , shots=1_0_0_0 )
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
print(F'Total count for various states are: {quantum_entanglement(3)}')
| 704
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Optional[Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = VOCAB_FILES_NAMES
A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : str = ['input_ids', 'attention_mask']
A_ : int = TaTokenizer
A_ : List[int] = []
def __init__( self : Union[str, Any] , __snake_case : Tuple=None , __snake_case : List[Any]=None , __snake_case : int="</s>" , __snake_case : List[Any]="<unk>" , __snake_case : Dict="<pad>" , __snake_case : Tuple=100 , __snake_case : int=None , **__snake_case : Any , ):
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ : Optional[int] = [f'''<extra_id_{i}>''' for i in range(__snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCAmelCase_ : Any = len(set(filter(lambda __snake_case : bool('''extra_id_''' in str(__snake_case ) ) , __snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
__snake_case , tokenizer_file=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , extra_ids=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
UpperCAmelCase_ : str = vocab_file
UpperCAmelCase_ : List[str] = False if not self.vocab_file else True
UpperCAmelCase_ : Union[str, Any] = extra_ids
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : List[str] , __snake_case : Tuple ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCAmelCase_ : str = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __snake_case , )
return max_model_length
def _lowerCamelCase ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : str = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def _lowerCamelCase ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCAmelCase_ : int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCamelCase ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : int = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return list(
set(filter(lambda __snake_case : bool(re.search(R'''<extra_id_\d+>''' , __snake_case ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [self.convert_tokens_to_ids(__snake_case ) for token in self.get_sentinel_tokens()]
| 641
| 0
|
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class lowerCAmelCase__:
'''simple docstring'''
A_ : str = BlenderbotConfig
A_ : Tuple = {}
A_ : str = 'gelu'
def __init__( self : Dict , __snake_case : str , __snake_case : Optional[Any]=13 , __snake_case : Any=7 , __snake_case : Optional[int]=True , __snake_case : Optional[Any]=False , __snake_case : str=99 , __snake_case : Optional[Any]=32 , __snake_case : List[Any]=2 , __snake_case : Dict=4 , __snake_case : Union[str, Any]=37 , __snake_case : List[str]=0.1 , __snake_case : Tuple=0.1 , __snake_case : Optional[int]=20 , __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=1 , __snake_case : List[Any]=0 , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : List[Any] = seq_length
UpperCAmelCase_ : Any = is_training
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : Dict = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : List[Any] = eos_token_id
UpperCAmelCase_ : Any = pad_token_id
UpperCAmelCase_ : int = bos_token_id
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase_ : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ : Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase_ : int = prepare_blenderbot_inputs_dict(__snake_case , __snake_case , __snake_case )
return config, inputs_dict
def _lowerCamelCase ( self : Tuple , __snake_case : List[str] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = TFBlenderbotModel(config=__snake_case ).get_decoder()
UpperCAmelCase_ : Optional[int] = inputs_dict['''input_ids''']
UpperCAmelCase_ : List[Any] = input_ids[:1, :]
UpperCAmelCase_ : Union[str, Any] = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase_ : str = inputs_dict['''head_mask''']
UpperCAmelCase_ : Dict = 1
# first forward pass
UpperCAmelCase_ : Optional[int] = model(__snake_case , attention_mask=__snake_case , head_mask=__snake_case , use_cache=__snake_case )
UpperCAmelCase_ : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase_ : Any = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase_ : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase_ : Optional[int] = model(__snake_case , attention_mask=__snake_case )[0]
UpperCAmelCase_ : Union[str, Any] = model(__snake_case , attention_mask=__snake_case , past_key_values=__snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__snake_case , __snake_case , rtol=1E-3 )
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ):
if attention_mask is None:
UpperCAmelCase_ : Any = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase__( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Any = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
A_ : Optional[Any] = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
A_ : Dict = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
A_ : Optional[Any] = True
A_ : Dict = False
A_ : List[str] = False
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Dict = TFBlenderbotModelTester(self )
UpperCAmelCase_ : List[str] = ConfigTester(self , config_class=__snake_case )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__snake_case )
@require_tokenizers
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
A_ : Dict = ['My friends are cool but they eat too many carbs.']
A_ : Optional[Any] = 'facebook/blenderbot-400M-distill'
@cached_property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.tokenizer(self.src_text , return_tensors='''tf''' )
UpperCAmelCase_ : str = self.model.generate(
model_inputs.input_ids , )
UpperCAmelCase_ : Optional[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__snake_case )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 705
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : str = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 0
|
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : Union[str, Any] , __snake_case : Dict , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : List[Any] = 13
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : Optional[int] = 30
UpperCAmelCase_ : str = self.seq_length + self.mem_len
UpperCAmelCase_ : Optional[Any] = 15
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : str = 99
UpperCAmelCase_ : List[Any] = [10, 50, 80]
UpperCAmelCase_ : Dict = 32
UpperCAmelCase_ : List[Any] = 32
UpperCAmelCase_ : Any = 4
UpperCAmelCase_ : Tuple = 8
UpperCAmelCase_ : Union[str, Any] = 128
UpperCAmelCase_ : Tuple = 2
UpperCAmelCase_ : Tuple = 2
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Optional[Any] = 3
UpperCAmelCase_ : List[str] = self.vocab_size - 1
UpperCAmelCase_ : Optional[int] = 0.01
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[Any] = None
if self.use_labels:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Optional[Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : str , __snake_case : List[str] , __snake_case : int , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = TFTransfoXLModel(__snake_case )
UpperCAmelCase_ : Any = model(__snake_case ).to_tuple()
UpperCAmelCase_ : Dict = {'''input_ids''': input_ids_a, '''mems''': mems_a}
UpperCAmelCase_ : Tuple = model(__snake_case ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _lowerCamelCase ( self : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Any = TFTransfoXLLMHeadModel(__snake_case )
UpperCAmelCase_ : Optional[int] = model(__snake_case ).to_tuple()
UpperCAmelCase_ : Optional[Any] = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
UpperCAmelCase_ : List[str] = model(__snake_case ).to_tuple()
UpperCAmelCase_ : Dict = model([input_ids_a, mems_a] ).to_tuple()
UpperCAmelCase_ : List[Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
UpperCAmelCase_ : Union[str, Any] = model(__snake_case ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _lowerCamelCase ( self : Dict , __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = TFTransfoXLForSequenceClassification(__snake_case )
UpperCAmelCase_ : List[Any] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.prepare_config_and_inputs()
(UpperCAmelCase_) : str = config_and_inputs
UpperCAmelCase_ : Any = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Union[str, Any] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
A_ : Union[str, Any] = () if is_tf_available() else ()
A_ : Optional[int] = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
A_ : Dict = False
A_ : int = False
A_ : Any = False
A_ : str = False
def _lowerCamelCase ( self : Dict , __snake_case : str , __snake_case : Dict , __snake_case : int , __snake_case : str , __snake_case : int ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = TFTransfoXLModelTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(self , config_class=__snake_case , d_embed=37 )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
self.model_tester.set_seed()
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
self.model_tester.set_seed()
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__snake_case )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Any = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(__snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
UpperCAmelCase_ : int = model.get_output_embeddings()
assert isinstance(__snake_case , tf.keras.layers.Layer )
UpperCAmelCase_ : int = model.get_bias()
assert name is None
else:
UpperCAmelCase_ : int = model.get_output_embeddings()
assert x is None
UpperCAmelCase_ : Union[str, Any] = model.get_bias()
assert name is None
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
pass
@slow
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = TFTransfoXLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
pass
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
UpperCAmelCase_ : Optional[Any] = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
UpperCAmelCase_ : Union[str, Any] = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
UpperCAmelCase_ : Union[str, Any] = model.generate(__snake_case , max_length=200 , do_sample=__snake_case )
self.assertListEqual(output_ids[0].numpy().tolist() , __snake_case )
| 706
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = ['image_processor', 'tokenizer']
A_ : int = 'LayoutLMv2ImageProcessor'
A_ : str = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : List[str]=None , **__snake_case : Optional[int] ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
UpperCAmelCase_ : List[Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__snake_case , __snake_case )
def __call__( self : List[str] , __snake_case : Dict , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Optional[int] , ):
'''simple docstring'''
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCAmelCase_ : Tuple = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_ : Any = features['''words''']
UpperCAmelCase_ : str = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
UpperCAmelCase_ : List[str] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase_ : Optional[int] = self.get_overflowing_images(__snake_case , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase_ : List[Any] = images
return encoded_inputs
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def _lowerCamelCase ( self : List[Any] , *__snake_case : Optional[int] , **__snake_case : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : str , *__snake_case : Optional[Any] , **__snake_case : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __snake_case , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __snake_case , )
return self.image_processor
| 641
| 0
|
import os
import sys
__UpperCamelCase : List[Any] = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__UpperCamelCase : Dict = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def snake_case_ ( *__lowercase , **__lowercase ):
return AutoConfig.from_pretrained(*__lowercase , **__lowercase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def snake_case_ ( *__lowercase , **__lowercase ):
return AutoTokenizer.from_pretrained(*__lowercase , **__lowercase )
@add_start_docstrings(AutoModel.__doc__ )
def snake_case_ ( *__lowercase , **__lowercase ):
return AutoModel.from_pretrained(*__lowercase , **__lowercase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def snake_case_ ( *__lowercase , **__lowercase ):
return AutoModelForCausalLM.from_pretrained(*__lowercase , **__lowercase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def snake_case_ ( *__lowercase , **__lowercase ):
return AutoModelForMaskedLM.from_pretrained(*__lowercase , **__lowercase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def snake_case_ ( *__lowercase , **__lowercase ):
return AutoModelForSequenceClassification.from_pretrained(*__lowercase , **__lowercase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def snake_case_ ( *__lowercase , **__lowercase ):
return AutoModelForQuestionAnswering.from_pretrained(*__lowercase , **__lowercase )
| 707
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : int
A_ : int
A_ : float
A_ : float
A_ : Tuple[int]
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = torch.arange(self.height * self.width )
UpperCAmelCase_ : Any = torch.stack(
[
pixel_indices % self.width,
torch.div(__snake_case , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ : List[Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(__snake_case ) )
UpperCAmelCase_ : str = self.get_image_coords()
UpperCAmelCase_ : List[str] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Tuple = self.get_camera_rays(__snake_case )
UpperCAmelCase_ : Union[str, Any] = rays.view(__snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _lowerCamelCase ( self : Dict , __snake_case : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : int = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : str = coords.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = self.resolution()
UpperCAmelCase_ : Optional[Any] = self.fov()
UpperCAmelCase_ : int = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[Any] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Optional[Any] = fracs.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(__snake_case , 1 , 3 )
+ self.x.view(__snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ : Optional[int] = torch.stack(
[
torch.broadcast_to(self.origin.view(__snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__snake_case , *__snake_case , 2 , 3 )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__snake_case , height=__snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : Tuple = np.array([np.sin(__lowercase ), np.cos(__lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : str = -z * 4
UpperCAmelCase_ : List[Any] = np.array([np.cos(__lowercase ), -np.sin(__lowercase ), 0.0] )
UpperCAmelCase_ : Tuple = np.cross(__lowercase , __lowercase )
origins.append(__lowercase )
xs.append(__lowercase )
ys.append(__lowercase )
zs.append(__lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , width=__lowercase , height=__lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowercase )) , )
| 641
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = 'timesformer'
def __init__( self : int , __snake_case : Any=224 , __snake_case : str=16 , __snake_case : Any=3 , __snake_case : List[Any]=8 , __snake_case : Dict=768 , __snake_case : Dict=12 , __snake_case : Tuple=12 , __snake_case : Dict=3_072 , __snake_case : str="gelu" , __snake_case : Union[str, Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.02 , __snake_case : Optional[Any]=1E-6 , __snake_case : List[Any]=True , __snake_case : List[str]="divided_space_time" , __snake_case : Optional[int]=0 , **__snake_case : Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : int = num_frames
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Dict = attention_type
UpperCAmelCase_ : str = drop_path_rate
| 708
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__UpperCamelCase : Dict = logging.getLogger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = 'token-classification'
def __init__( self : Any , __snake_case : Optional[Any] ):
'''simple docstring'''
if type(__snake_case ) == dict:
UpperCAmelCase_ : Tuple = Namespace(**__snake_case )
UpperCAmelCase_ : Dict = import_module('''tasks''' )
try:
UpperCAmelCase_ : int = getattr(__snake_case , hparams.task_type )
UpperCAmelCase_ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
UpperCAmelCase_ : int = self.token_classification_task.get_labels(hparams.labels )
UpperCAmelCase_ : Dict = CrossEntropyLoss().ignore_index
super().__init__(__snake_case , len(self.labels ) , self.mode )
def _lowerCamelCase ( self : Optional[int] , **__snake_case : Optional[Any] ):
'''simple docstring'''
return self.model(**__snake_case )
def _lowerCamelCase ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Union[str, Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : str = self(**__snake_case )
UpperCAmelCase_ : Any = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.hparams
for mode in ["train", "dev", "test"]:
UpperCAmelCase_ : Optional[Any] = self._feature_file(__snake_case )
if os.path.exists(__snake_case ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Any = torch.load(__snake_case )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
UpperCAmelCase_ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , __snake_case )
UpperCAmelCase_ : List[str] = self.token_classification_task.convert_examples_to_features(
__snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__snake_case , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , __snake_case )
torch.save(__snake_case , __snake_case )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int , __snake_case : bool = False ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self._feature_file(__snake_case )
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Optional[int] = torch.load(__snake_case )
UpperCAmelCase_ : Any = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCAmelCase_ : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCAmelCase_ : Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCAmelCase_ : Optional[int] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCAmelCase_ : int = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__snake_case , __snake_case , __snake_case , __snake_case ) , batch_size=__snake_case )
def _lowerCamelCase ( self : List[Any] , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
"""Compute validation""" ""
UpperCAmelCase_ : str = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Any = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : int = self(**__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs[:2]
UpperCAmelCase_ : Optional[int] = logits.detach().cpu().numpy()
UpperCAmelCase_ : List[Any] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self : List[str] , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
UpperCAmelCase_ : Dict = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Any = np.argmax(__snake_case , axis=2 )
UpperCAmelCase_ : int = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Tuple = dict(enumerate(self.labels ) )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase_ : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCAmelCase_ : Union[str, Any] = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(__snake_case , __snake_case ),
'''precision''': precision_score(__snake_case , __snake_case ),
'''recall''': recall_score(__snake_case , __snake_case ),
'''f1''': fa_score(__snake_case , __snake_case ),
}
UpperCAmelCase_ : str = dict(results.items() )
UpperCAmelCase_ : List[Any] = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self : List[str] , __snake_case : int ):
'''simple docstring'''
# when stable
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = self._eval_end(__snake_case )
UpperCAmelCase_ : int = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self : List[Any] , __snake_case : Tuple ):
'''simple docstring'''
# updating to test_epoch_end instead of deprecated test_end
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._eval_end(__snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCAmelCase_ : Optional[Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
# Add NER specific options
BaseTransformer.add_model_specific_args(__snake_case , __snake_case )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=__snake_case , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__snake_case , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=__snake_case , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__snake_case , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__UpperCamelCase : Dict = NERTransformer.add_model_specific_args(parser, os.getcwd())
__UpperCamelCase : Tuple = parser.parse_args()
__UpperCamelCase : Optional[Any] = NERTransformer(args)
__UpperCamelCase : int = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__UpperCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
__UpperCamelCase : List[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 641
| 0
|
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Any = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Union[str, Any] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
UpperCAmelCase_ : Union[str, Any] = s_dict.pop(__lowercase )
elif "subsample" in key:
UpperCAmelCase_ : List[Any] = s_dict.pop(__lowercase )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Dict = emb.weight.shape
UpperCAmelCase_ : Union[str, Any] = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
UpperCAmelCase_ : List[Any] = emb.weight.data
return lin_layer
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : str = torch.load(__lowercase , map_location='''cpu''' )
UpperCAmelCase_ : int = mam_aaa['''args''']
UpperCAmelCase_ : Dict = mam_aaa['''model''']
UpperCAmelCase_ : Optional[Any] = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(__lowercase )
rename_keys(__lowercase )
UpperCAmelCase_ : int = state_dict['''decoder.embed_tokens.weight'''].shape[0]
UpperCAmelCase_ : Optional[Any] = args.share_decoder_input_output_embed
UpperCAmelCase_ : str = [int(__lowercase ) for i in args.conv_kernel_sizes.split(''',''' )]
UpperCAmelCase_ : List[Any] = SpeechaTextConfig(
vocab_size=__lowercase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(__lowercase ) , conv_channels=args.conv_channels , conv_kernel_sizes=__lowercase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__lowercase , num_beams=5 , max_length=2_0_0 , use_cache=__lowercase , decoder_start_token_id=2 , early_stopping=__lowercase , )
UpperCAmelCase_ : Tuple = SpeechaTextForConditionalGeneration(__lowercase )
UpperCAmelCase_ : List[Any] = model.model.load_state_dict(__lowercase , strict=__lowercase )
if len(__lowercase ) > 0 and not set(__lowercase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
UpperCAmelCase_ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
UpperCAmelCase_ : List[Any] = lm_head_weights
model.save_pretrained(__lowercase )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__UpperCamelCase : Tuple = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 709
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'encoder-decoder'
A_ : Optional[int] = True
def __init__( self : Dict , **__snake_case : Union[str, Any] ):
'''simple docstring'''
super().__init__(**__snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCAmelCase_ : int = kwargs.pop('''encoder''' )
UpperCAmelCase_ : List[Any] = encoder_config.pop('''model_type''' )
UpperCAmelCase_ : int = kwargs.pop('''decoder''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Optional[int] = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : List[Any] = True
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Union[str, Any] ):
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Tuple = self.encoder.to_dict()
UpperCAmelCase_ : Tuple = self.decoder.to_dict()
UpperCAmelCase_ : Tuple = self.__class__.model_type
return output
| 641
| 0
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def snake_case_ ( __lowercase , __lowercase , __lowercase = 1_0**-1_0 ):
UpperCAmelCase_ : Optional[Any] = a
while True:
UpperCAmelCase_ : Any = Decimal(__lowercase ) - (
Decimal(eval(__lowercase ) ) / Decimal(eval(str(diff(__lowercase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__lowercase ) ) < precision: # noqa: S307
return float(__lowercase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(F'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(F'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(F'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 710
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def snake_case_ ( ):
UpperCAmelCase_ : int = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
UpperCAmelCase_ : Dict = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('''RGB''' )
return image
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Any = dct.pop(__lowercase )
UpperCAmelCase_ : Optional[Any] = val
def snake_case_ ( __lowercase , __lowercase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCAmelCase_ : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCAmelCase_ : int = torch.cat((q_bias, torch.zeros_like(__lowercase , requires_grad=__lowercase ), v_bias) )
UpperCAmelCase_ : List[str] = qkv_bias
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = 3_6_4 if '''coco''' in model_name else 2_2_4
UpperCAmelCase_ : Any = BlipaVisionConfig(image_size=__lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase_ : Any = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=__lowercase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase_ : List[str] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=__lowercase ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase_ : List[str] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase_ : Any = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
UpperCAmelCase_ : List[Any] = BlipaConfig(vision_config=__lowercase , text_config=__lowercase )
return config, image_size
@torch.no_grad()
def snake_case_ ( __lowercase , __lowercase=None , __lowercase=False ):
UpperCAmelCase_ : List[Any] = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
UpperCAmelCase_ : List[str] = tokenizer('''\n''' , add_special_tokens=__lowercase ).input_ids[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = get_blipa_config(__lowercase , eos_token_id=__lowercase )
UpperCAmelCase_ : List[Any] = BlipaForConditionalGeneration(__lowercase ).eval()
UpperCAmelCase_ : Tuple = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = load_model_and_preprocess(
name=__lowercase , model_type=__lowercase , is_eval=__lowercase , device=__lowercase )
original_model.eval()
print('''Done!''' )
# update state dict keys
UpperCAmelCase_ : Optional[Any] = original_model.state_dict()
UpperCAmelCase_ : List[Any] = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(__lowercase )
if key.startswith('''Qformer.bert''' ):
UpperCAmelCase_ : Tuple = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
UpperCAmelCase_ : Optional[Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
UpperCAmelCase_ : Any = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
UpperCAmelCase_ : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
UpperCAmelCase_ : Any = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
UpperCAmelCase_ : Optional[Any] = key.replace('''t5''' , '''language''' )
UpperCAmelCase_ : List[str] = val
# read in qv biases
read_in_q_v_bias(__lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = hf_model.load_state_dict(__lowercase , strict=__lowercase )
assert len(__lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase_ : str = load_demo_image()
UpperCAmelCase_ : Any = vis_processors['''eval'''](__lowercase ).unsqueeze(0 ).to(__lowercase )
UpperCAmelCase_ : Optional[Any] = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(__lowercase )
# create processor
UpperCAmelCase_ : Optional[int] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=__lowercase , image_std=__lowercase )
UpperCAmelCase_ : Tuple = BlipaProcessor(image_processor=__lowercase , tokenizer=__lowercase )
UpperCAmelCase_ : str = processor(images=__lowercase , return_tensors='''pt''' ).pixel_values.to(__lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowercase , __lowercase )
original_model.to(__lowercase )
hf_model.to(__lowercase )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase_ : Tuple = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
UpperCAmelCase_ : Optional[int] = hf_model(__lowercase , __lowercase ).logits
else:
UpperCAmelCase_ : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
UpperCAmelCase_ : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
UpperCAmelCase_ : int = hf_model(__lowercase , __lowercase , labels=__lowercase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=__lowercase )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=__lowercase )
else:
# cast to same type
UpperCAmelCase_ : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(__lowercase ) , __lowercase , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
UpperCAmelCase_ : Union[str, Any] = ''''''
UpperCAmelCase_ : Optional[Any] = tokenizer(__lowercase , return_tensors='''pt''' ).input_ids.to(__lowercase )
UpperCAmelCase_ : int = original_model.generate({'''image''': original_pixel_values} )
UpperCAmelCase_ : Optional[int] = hf_model.generate(
__lowercase , __lowercase , do_sample=__lowercase , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , __lowercase )
UpperCAmelCase_ : Tuple = input_ids.shape[1]
UpperCAmelCase_ : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowercase )
UpperCAmelCase_ : Optional[int] = [text.strip() for text in output_text]
print('''HF generation:''' , __lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowercase )
hf_model.save_pretrained(__lowercase )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
__UpperCamelCase : Optional[Any] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__UpperCamelCase : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 641
| 0
|
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__( snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : int = ProphetNetTokenizer
A_ : Union[str, Any] = False
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
super().setUp()
UpperCAmelCase_ : Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowerCamelCase ( self : List[str] , __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = '''UNwant\u00E9d,running'''
UpperCAmelCase_ : List[str] = '''unwanted, running'''
return input_text, output_text
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase_ : str = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__snake_case , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [9, 6, 7, 12, 10, 11] )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=__snake_case , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
UpperCAmelCase_ : Any = {}
for i, token in enumerate(__snake_case ):
UpperCAmelCase_ : List[str] = i
UpperCAmelCase_ : List[Any] = WordpieceTokenizer(vocab=__snake_case , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
UpperCAmelCase_ : int = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase_ : Optional[int] = [1_037, 2_146, 20_423, 2_005, 7_680, 7_849, 3_989, 1_012, 102]
UpperCAmelCase_ : str = tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase_ : int = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
UpperCAmelCase_ : int = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
UpperCAmelCase_ : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
UpperCAmelCase_ : int = tokenizer.build_inputs_with_special_tokens(__snake_case )
UpperCAmelCase_ : List[Any] = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 711
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : int = multiprocessing.Manager()
UpperCAmelCase_ : Union[str, Any] = manager.list()
UpperCAmelCase_ : int = multiprocessing.Process(target=__lowercase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
UpperCAmelCase_ : str = shutil.rmtree
UpperCAmelCase_ : Tuple = os.rmdir
UpperCAmelCase_ : Dict = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
UpperCAmelCase_ : Optional[int] = {}
with swallow_io():
with time_limit(__lowercase ):
exec(__lowercase , __lowercase )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(F'''failed: {e}''' )
# Needed for cleaning up.
UpperCAmelCase_ : Optional[int] = rmtree
UpperCAmelCase_ : Optional[Any] = rmdir
UpperCAmelCase_ : Optional[Any] = chdir
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
def signal_handler(__lowercase , __lowercase ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , __lowercase )
signal.signal(signal.SIGALRM , __lowercase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def snake_case_ ( ):
UpperCAmelCase_ : Optional[Any] = WriteOnlyStringIO()
with contextlib.redirect_stdout(__lowercase ):
with contextlib.redirect_stderr(__lowercase ):
with redirect_stdin(__lowercase ):
yield
@contextlib.contextmanager
def snake_case_ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(__lowercase ):
yield dirname
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
pass
class lowerCAmelCase__( io.StringIO ):
'''simple docstring'''
def _lowerCamelCase ( self : Dict , *__snake_case : List[Any] , **__snake_case : int ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Dict , *__snake_case : int , **__snake_case : Any ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : int , *__snake_case : List[str] , **__snake_case : Optional[Any] ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
'''simple docstring'''
return False
class lowerCAmelCase__( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
A_ : Optional[Any] = 'stdin'
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
if root == ".":
yield
return
UpperCAmelCase_ : Tuple = os.getcwd()
os.chdir(__lowercase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__lowercase )
def snake_case_ ( __lowercase=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = None
import os
UpperCAmelCase_ : Union[str, Any] = '''1'''
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
import shutil
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
import subprocess
UpperCAmelCase_ : Dict = None # type: ignore
UpperCAmelCase_ : Union[str, Any] = None
import sys
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
| 641
| 0
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.dummy_uncond_unet
UpperCAmelCase_ : Optional[int] = ScoreSdeVeScheduler()
UpperCAmelCase_ : str = ScoreSdeVePipeline(unet=__snake_case , scheduler=__snake_case )
sde_ve.to(__snake_case )
sde_ve.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : str = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__snake_case ).images
UpperCAmelCase_ : str = torch.manual_seed(0 )
UpperCAmelCase_ : Dict = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__snake_case , return_dict=__snake_case )[
0
]
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = '''google/ncsnpp-church-256'''
UpperCAmelCase_ : Dict = UNetaDModel.from_pretrained(__snake_case )
UpperCAmelCase_ : List[Any] = ScoreSdeVeScheduler.from_pretrained(__snake_case )
UpperCAmelCase_ : Optional[int] = ScoreSdeVePipeline(unet=__snake_case , scheduler=__snake_case )
sde_ve.to(__snake_case )
sde_ve.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Any = torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=__snake_case ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : int = 'falcon'
A_ : int = ['past_key_values']
def __init__( self : Optional[Any] , __snake_case : Tuple=65_024 , __snake_case : List[str]=4_544 , __snake_case : Optional[Any]=32 , __snake_case : Any=71 , __snake_case : str=1E-5 , __snake_case : List[str]=0.02 , __snake_case : List[Any]=True , __snake_case : Dict=0.0 , __snake_case : Optional[Any]=0.0 , __snake_case : Any=None , __snake_case : List[Any]=False , __snake_case : Dict=False , __snake_case : Optional[int]=True , __snake_case : List[Any]=True , __snake_case : Optional[Any]=False , __snake_case : Dict=11 , __snake_case : List[str]=11 , **__snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : int = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ : Union[str, Any] = kwargs.pop('''n_embed''' , __snake_case )
UpperCAmelCase_ : str = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : Optional[int] = layer_norm_epsilon
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[int] = use_cache
UpperCAmelCase_ : List[Any] = hidden_dropout
UpperCAmelCase_ : Any = attention_dropout
UpperCAmelCase_ : Tuple = bos_token_id
UpperCAmelCase_ : List[Any] = eos_token_id
UpperCAmelCase_ : Any = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ : Optional[int] = alibi
UpperCAmelCase_ : Dict = new_decoder_architecture
UpperCAmelCase_ : List[Any] = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ : Tuple = parallel_attn
UpperCAmelCase_ : List[Any] = bias
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return not self.alibi
| 641
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Union[str, Any] = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 713
|
def snake_case_ ( __lowercase ):
return " ".join(
''''''.join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 641
| 0
|
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
def snake_case_ ( __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = False , ):
UpperCAmelCase_ : Dict = bnb_quantization_config.load_in_abit
UpperCAmelCase_ : Optional[int] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
UpperCAmelCase_ : Optional[Any] = []
# custom device map
if isinstance(__lowercase , __lowercase ) and len(device_map.keys() ) > 1:
UpperCAmelCase_ : Any = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCAmelCase_ : List[str] = get_keys_to_not_convert(__lowercase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__lowercase )
UpperCAmelCase_ : Optional[int] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : int = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__lowercase )
# compatibility with peft
UpperCAmelCase_ : List[Any] = load_in_abit
UpperCAmelCase_ : Optional[int] = load_in_abit
UpperCAmelCase_ : Optional[Any] = get_parameter_device(__lowercase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
UpperCAmelCase_ : str = replace_with_bnb_layers(__lowercase , __lowercase , modules_to_not_convert=__lowercase )
# convert param to the right dtype
UpperCAmelCase_ : int = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCAmelCase_ : str = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
UpperCAmelCase_ : Dict = getattr(__lowercase , __lowercase , __lowercase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__lowercase ):
param.to(__lowercase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
UpperCAmelCase_ : int = replace_with_bnb_layers(
__lowercase , __lowercase , modules_to_not_convert=__lowercase )
UpperCAmelCase_ : Optional[int] = get_quantized_model_device_map(
__lowercase , __lowercase , __lowercase , max_memory=__lowercase , no_split_module_classes=__lowercase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : Optional[Any] = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
__lowercase , __lowercase , __lowercase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__lowercase , offload_state_dict=__lowercase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__lowercase , device_map=__lowercase , offload_dir=__lowercase )
def snake_case_ ( __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None ):
if device_map is None:
if torch.cuda.is_available():
UpperCAmelCase_ : List[Any] = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(__lowercase , __lowercase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
UpperCAmelCase_ : List[str] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCAmelCase_ : Dict = {}
UpperCAmelCase_ : Tuple = special_dtypes
UpperCAmelCase_ : List[str] = no_split_module_classes
UpperCAmelCase_ : str = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCAmelCase_ : Union[str, Any] = get_balanced_memory(
__lowercase , low_zero=(device_map == '''balanced_low_0''') , max_memory=__lowercase , **__lowercase , )
UpperCAmelCase_ : List[Any] = max_memory
UpperCAmelCase_ : List[Any] = infer_auto_device_map(__lowercase , **__lowercase )
if isinstance(__lowercase , __lowercase ):
# check if don't have any quantized module on the cpu
UpperCAmelCase_ : List[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCAmelCase_ : int = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def snake_case_ ( __lowercase , __lowercase , __lowercase=None , __lowercase=None ):
if modules_to_not_convert is None:
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Union[str, Any] = _replace_with_bnb_layers(
__lowercase , __lowercase , __lowercase , __lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def snake_case_ ( __lowercase , __lowercase , __lowercase=None , __lowercase=None , ):
UpperCAmelCase_ : str = False
for name, module in model.named_children():
if current_key_name is None:
UpperCAmelCase_ : Union[str, Any] = []
current_key_name.append(__lowercase )
if isinstance(__lowercase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCAmelCase_ : Dict = '''.'''.join(__lowercase )
UpperCAmelCase_ : Tuple = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCAmelCase_ : Tuple = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCAmelCase_ : List[str] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__lowercase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCAmelCase_ : Tuple = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
UpperCAmelCase_ : Optional[Any] = module.weight.data
if module.bias is not None:
UpperCAmelCase_ : List[str] = module.bias.data
bnb_module.requires_grad_(__lowercase )
setattr(__lowercase , __lowercase , __lowercase )
UpperCAmelCase_ : Optional[Any] = True
if len(list(module.children() ) ) > 0:
UpperCAmelCase_ : Tuple = _replace_with_bnb_layers(
__lowercase , __lowercase , __lowercase , __lowercase )
UpperCAmelCase_ : Tuple = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def snake_case_ ( __lowercase ):
# Create a copy of the model
with init_empty_weights():
UpperCAmelCase_ : List[str] = deepcopy(__lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCAmelCase_ : int = find_tied_parameters(__lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowercase , __lowercase ):
UpperCAmelCase_ : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCAmelCase_ : Optional[int] = sum(__lowercase , [] )
UpperCAmelCase_ : Optional[Any] = len(__lowercase ) > 0
# Check if it is a base model
UpperCAmelCase_ : Union[str, Any] = False
if hasattr(__lowercase , '''base_model_prefix''' ):
UpperCAmelCase_ : Optional[int] = not hasattr(__lowercase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCAmelCase_ : Optional[Any] = list(model.named_children() )
UpperCAmelCase_ : int = [list_modules[-1][0]]
# add last module together with tied weights
UpperCAmelCase_ : int = set(__lowercase ) - set(__lowercase )
UpperCAmelCase_ : List[str] = list(set(__lowercase ) ) + list(__lowercase )
# remove ".weight" from the keys
UpperCAmelCase_ : Optional[Any] = ['''.weight''', '''.bias''']
UpperCAmelCase_ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCAmelCase_ : List[str] = name.replace(__lowercase , '''''' )
filtered_module_names.append(__lowercase )
return filtered_module_names
def snake_case_ ( __lowercase ):
for m in model.modules():
if isinstance(__lowercase , bnb.nn.Linearabit ):
return True
return False
def snake_case_ ( __lowercase ):
return next(parameter.parameters() ).device
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__lowercase , __lowercase , 0 , dtype=__lowercase , value=__lowercase )
UpperCAmelCase_ : Optional[int] = param_name
UpperCAmelCase_ : Optional[int] = model
if "." in tensor_name:
UpperCAmelCase_ : int = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCAmelCase_ : Any = getattr(__lowercase , __lowercase )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
UpperCAmelCase_ : Any = new_module
UpperCAmelCase_ : int = splits[-1]
# offload weights
UpperCAmelCase_ : Any = False
offload_weight(module._parameters[tensor_name] , __lowercase , __lowercase , index=__lowercase )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , __lowercase , index=__lowercase , )
else:
offload_weight(__lowercase , __lowercase , __lowercase , index=__lowercase )
offload_weight(__lowercase , param_name.replace('''weight''' , '''SCB''' ) , __lowercase , index=__lowercase )
set_module_tensor_to_device(__lowercase , __lowercase , '''meta''' , dtype=__lowercase , value=torch.empty(*param.size() ) )
| 714
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase : str = 'true'
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=1_6 ):
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = RegressionModel()
UpperCAmelCase_ : Optional[int] = deepcopy(__lowercase )
UpperCAmelCase_ : Union[str, Any] = RegressionDataset(length=__lowercase )
UpperCAmelCase_ : Any = DataLoader(__lowercase , batch_size=__lowercase )
model.to(accelerator.device )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.prepare(__lowercase , __lowercase )
return model, ddp_model, dataloader
def snake_case_ ( __lowercase , __lowercase=False ):
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
UpperCAmelCase_ : List[Any] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(__lowercase ):
UpperCAmelCase_ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowercase , max_length=__lowercase )
return outputs
with accelerator.main_process_first():
UpperCAmelCase_ : List[str] = dataset.map(
__lowercase , batched=__lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
UpperCAmelCase_ : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowercase ):
if use_longest:
return tokenizer.pad(__lowercase , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(__lowercase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return DataLoader(__lowercase , shuffle=__lowercase , collate_fn=__lowercase , batch_size=1_6 )
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[int] = Accelerator(dispatch_batches=__lowercase , split_batches=__lowercase )
UpperCAmelCase_ : int = get_dataloader(__lowercase , not dispatch_batches )
UpperCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(__lowercase , __lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Dict = []
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = batch.values()
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase_ , UpperCAmelCase_ : Any = [], []
for logit, targ in logits_and_targets:
logits.append(__lowercase )
targs.append(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = torch.cat(__lowercase ), torch.cat(__lowercase )
return logits, targs
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=False , __lowercase=False , __lowercase=1_6 ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = get_basic_setup(__lowercase , __lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = generate_predictions(__lowercase , __lowercase , __lowercase )
assert (
len(__lowercase ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__lowercase )}'''
def snake_case_ ( __lowercase = False , __lowercase = False ):
UpperCAmelCase_ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_mrpc_setup(__lowercase , __lowercase )
# First do baseline
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = setup['''no''']
model.to(__lowercase )
model.eval()
for batch in dataloader:
batch.to(__lowercase )
with torch.inference_mode():
UpperCAmelCase_ : str = model(**__lowercase )
UpperCAmelCase_ : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__lowercase , references=batch['''labels'''] )
UpperCAmelCase_ : Optional[int] = metric.compute()
# Then do distributed
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase_ : Optional[int] = model(**__lowercase )
UpperCAmelCase_ : int = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : Optional[int] = batch['''labels''']
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__lowercase , references=__lowercase )
UpperCAmelCase_ : Dict = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def snake_case_ ( ):
UpperCAmelCase_ : str = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__lowercase , __lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase_ : Optional[Any] = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__lowercase , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
UpperCAmelCase_ : List[Any] = Accelerator()
test_torch_metrics(__lowercase , 5_1_2 )
accelerator.state._reset_state()
def snake_case_ ( __lowercase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 641
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
def __init__( self : str , __snake_case : int , __snake_case : List[str] ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCAmelCase_ : str = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self : Union[str, Any] , __snake_case : int = 1 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : float = 0.0 , __snake_case : int = 50 , __snake_case : Optional[bool] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , ):
'''simple docstring'''
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __snake_case ):
UpperCAmelCase_ : Dict = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
UpperCAmelCase_ : List[str] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCAmelCase_ : Dict = randn_tensor(__snake_case , generator=__snake_case , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase_ : Optional[Any] = self.unet(__snake_case , __snake_case ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCAmelCase_ : Union[str, Any] = self.scheduler.step(
__snake_case , __snake_case , __snake_case , eta=__snake_case , use_clipped_model_output=__snake_case , generator=__snake_case ).prev_sample
UpperCAmelCase_ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ : Optional[Any] = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
| 715
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 0
|
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
# Initialise PyTorch model
UpperCAmelCase_ : str = MobileBertConfig.from_json_file(__lowercase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ : int = MobileBertForPreTraining(__lowercase )
# Load weights from tf checkpoint
UpperCAmelCase_ : Dict = load_tf_weights_in_mobilebert(__lowercase , __lowercase , __lowercase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 716
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : List[str] , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = data
UpperCAmelCase_ : List[Any] = [0X67_45_23_01, 0Xef_cd_ab_89, 0X98_ba_dc_fe, 0X10_32_54_76, 0Xc3_d2_e1_f0]
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xff_ff_ff_ff
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase_ : Union[str, Any] = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _lowerCamelCase ( self : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Any = list(struct.unpack('''>16L''' , __snake_case ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase_ : str = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.padding()
UpperCAmelCase_ : str = self.split_blocks()
for block in self.blocks:
UpperCAmelCase_ : Any = self.expand_block(__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase_ : Optional[Any] = (b & c) | ((~b) & d)
UpperCAmelCase_ : Optional[Any] = 0X5a_82_79_99
elif 20 <= i < 40:
UpperCAmelCase_ : List[Any] = b ^ c ^ d
UpperCAmelCase_ : str = 0X6e_d9_eb_a1
elif 40 <= i < 60:
UpperCAmelCase_ : str = (b & c) | (b & d) | (c & d)
UpperCAmelCase_ : Optional[int] = 0X8f_1b_bc_dc
elif 60 <= i < 80:
UpperCAmelCase_ : Union[str, Any] = b ^ c ^ d
UpperCAmelCase_ : Dict = 0Xca_62_c1_d6
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = (
self.rotate(__snake_case , 5 ) + f + e + k + expanded_block[i] & 0Xff_ff_ff_ff,
a,
self.rotate(__snake_case , 30 ),
c,
d,
)
UpperCAmelCase_ : Optional[Any] = (
self.h[0] + a & 0Xff_ff_ff_ff,
self.h[1] + b & 0Xff_ff_ff_ff,
self.h[2] + c & 0Xff_ff_ff_ff,
self.h[3] + d & 0Xff_ff_ff_ff,
self.h[4] + e & 0Xff_ff_ff_ff,
)
return ("{:08x}" * 5).format(*self.h )
def snake_case_ ( ):
UpperCAmelCase_ : Tuple = B'''Test String'''
assert SHAaHash(__lowercase ).final_hash() == hashlib.shaa(__lowercase ).hexdigest() # noqa: S324
def snake_case_ ( ):
UpperCAmelCase_ : int = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
UpperCAmelCase_ : List[Any] = parser.parse_args()
UpperCAmelCase_ : Optional[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
UpperCAmelCase_ : List[str] = f.read()
else:
UpperCAmelCase_ : Tuple = bytes(__lowercase , '''utf-8''' )
print(SHAaHash(__lowercase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 641
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , __snake_case : int , __snake_case : Dict=7 , __snake_case : Any=3 , __snake_case : Dict=18 , __snake_case : Optional[Any]=30 , __snake_case : Tuple=400 , __snake_case : List[str]=True , __snake_case : Tuple=None , __snake_case : Tuple=True , __snake_case : Dict=False , __snake_case : Any=True , __snake_case : Optional[Any]=True , __snake_case : Dict=[0.5, 0.5, 0.5] , __snake_case : int=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : str = num_channels
UpperCAmelCase_ : Union[str, Any] = image_size
UpperCAmelCase_ : Optional[Any] = min_resolution
UpperCAmelCase_ : Union[str, Any] = max_resolution
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : Optional[Any] = size if size is not None else {'''height''': 18, '''width''': 20}
UpperCAmelCase_ : Dict = do_thumbnail
UpperCAmelCase_ : int = do_align_axis
UpperCAmelCase_ : Dict = do_pad
UpperCAmelCase_ : Optional[int] = do_normalize
UpperCAmelCase_ : List[str] = image_mean
UpperCAmelCase_ : Any = image_std
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCAmelCase__( snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : str = DonutImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = DonutImageProcessingTester(self )
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
self.assertTrue(hasattr(__snake_case , '''do_thumbnail''' ) )
self.assertTrue(hasattr(__snake_case , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(__snake_case , '''do_pad''' ) )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(__snake_case , '''image_std''' ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
UpperCAmelCase_ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
UpperCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
pass
@is_flaky()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : Optional[Any] = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
UpperCAmelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : str = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : str = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 717
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = 'timesformer'
def __init__( self : int , __snake_case : Any=224 , __snake_case : str=16 , __snake_case : Any=3 , __snake_case : List[Any]=8 , __snake_case : Dict=768 , __snake_case : Dict=12 , __snake_case : Tuple=12 , __snake_case : Dict=3_072 , __snake_case : str="gelu" , __snake_case : Union[str, Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.02 , __snake_case : Optional[Any]=1E-6 , __snake_case : List[Any]=True , __snake_case : List[str]="divided_space_time" , __snake_case : Optional[int]=0 , **__snake_case : Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : int = num_frames
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Dict = attention_type
UpperCAmelCase_ : str = drop_path_rate
| 641
| 0
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def snake_case_ ( __lowercase : List[Any] ):
UpperCAmelCase_ : Tuple = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def snake_case_ ( __lowercase : List[str] ):
UpperCAmelCase_ : Any = emb.weight.shape
UpperCAmelCase_ : str = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
UpperCAmelCase_ : int = emb.weight.data
return lin_layer
def snake_case_ ( __lowercase : Any , __lowercase : Any=None ):
UpperCAmelCase_ : Optional[Any] = {}
for old_key in state_dict.keys():
UpperCAmelCase_ : Optional[Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCAmelCase_ : Tuple = key.replace('''moe_layer.experts.0''' , F'''ffn.experts.expert_{expert_idx}''' )
else:
UpperCAmelCase_ : Union[str, Any] = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
UpperCAmelCase_ : Dict = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
UpperCAmelCase_ : Optional[int] = key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
UpperCAmelCase_ : Any = key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
UpperCAmelCase_ : Optional[int] = key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
UpperCAmelCase_ : Optional[Any] = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
UpperCAmelCase_ : Optional[int] = key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
UpperCAmelCase_ : Union[str, Any] = state_dict[old_key]
return new_dict
def snake_case_ ( __lowercase : int , __lowercase : List[Any] , __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : Any = WEIGHTS_NAME ):
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : List[Any] = 0
os.makedirs(__lowercase , exist_ok=__lowercase )
for expert in range(__lowercase ):
UpperCAmelCase_ : str = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(__lowercase ):
UpperCAmelCase_ : Optional[int] = torch.load(__lowercase )['''model''']
remove_ignore_keys_(__lowercase )
UpperCAmelCase_ : int = rename_fairseq_keys(__lowercase , __lowercase )
UpperCAmelCase_ : Dict = os.path.join(
__lowercase , weights_name.replace('''.bin''' , F'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
torch.save(__lowercase , __lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowercase )[0]].dtype )
# Add the last block
UpperCAmelCase_ : Dict = os.path.join(__lowercase , weights_name.replace('''.bin''' , F'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
UpperCAmelCase_ : Tuple = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(__lowercase )
UpperCAmelCase_ : Tuple = rename_fairseq_keys(__lowercase , __lowercase )
UpperCAmelCase_ : Tuple = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowercase ) == 1:
UpperCAmelCase_ : Any = os.path.join(__lowercase , __lowercase )
torch.save(__lowercase , __lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowercase , __lowercase )
# Otherwise, let's build the index
UpperCAmelCase_ : Optional[Any] = {}
for idx, shard in enumerate(__lowercase ):
UpperCAmelCase_ : Tuple = weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-{len(__lowercase ):05d}.bin''' )
UpperCAmelCase_ : int = os.path.join(__lowercase , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowercase , os.path.join(__lowercase , __lowercase ) )
for key in shard:
UpperCAmelCase_ : int = shard_file
# Add the metadata
UpperCAmelCase_ : List[Any] = {'''total_size''': total_size}
UpperCAmelCase_ : Any = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__lowercase , __lowercase ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : Union[str, Any] = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + '''\n'''
f.write(__lowercase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__UpperCamelCase : int = parser.parse_args()
__UpperCamelCase : Dict = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__UpperCamelCase : Optional[int] = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__UpperCamelCase : List[Any] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 718
|
import math
import qiskit
def snake_case_ ( __lowercase = 1 , __lowercase = 1 , __lowercase = 1 ):
if (
isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
UpperCAmelCase_ : Any = qiskit.QuantumRegister(4 , '''qr''' )
UpperCAmelCase_ : List[str] = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
UpperCAmelCase_ : Any = [input_a, input_a, carry_in]
UpperCAmelCase_ : Dict = qiskit.QuantumCircuit(__lowercase , __lowercase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__lowercase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__lowercase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__lowercase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __lowercase ) # measure the last two qbits
UpperCAmelCase_ : Optional[int] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCAmelCase_ : List[str] = qiskit.execute(__lowercase , __lowercase , shots=1_0_0_0 )
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 641
| 0
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__UpperCamelCase : Union[str, Any] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _lowerCamelCase ( cls : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def _lowerCamelCase ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : Any = FlaxBertModel(__snake_case )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
UpperCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
UpperCAmelCase_ : str = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__snake_case , repo_id='''test-model-flax''' , push_to_hub=__snake_case , use_auth_token=self._token )
UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : str = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Dict = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1E-3 , msg=f'''{key} not identical''' )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : int = FlaxBertModel(__snake_case )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Any = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : str = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__snake_case , use_auth_token=self._token )
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : int = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Optional[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1E-3 , msg=f'''{key} not identical''' )
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Union[str, Any] = flatten_dict(modela.params )
UpperCAmelCase_ : Union[str, Any] = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCAmelCase_ : List[str] = False
return models_are_equal
@require_flax
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Any = FlaxBertModel(__snake_case )
UpperCAmelCase_ : Tuple = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__snake_case , __snake_case ) )
with self.assertRaises(__snake_case ):
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertTrue(check_models_equal(__snake_case , __snake_case ) )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : List[Any] = FlaxBertModel(__snake_case )
UpperCAmelCase_ : List[str] = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__snake_case , __snake_case ) , max_shard_size='''10KB''' )
with self.assertRaises(__snake_case ):
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertTrue(check_models_equal(__snake_case , __snake_case ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(__snake_case ):
UpperCAmelCase_ : Dict = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase_ : Optional[Any] = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertIsNotNone(__snake_case )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '''bert'''
UpperCAmelCase_ : Any = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(__snake_case ):
UpperCAmelCase_ : List[str] = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase_ : Any = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertIsNotNone(__snake_case )
| 719
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[str] = 'gpt_bigcode'
A_ : Optional[Any] = ['past_key_values']
A_ : Optional[int] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int , __snake_case : Dict=50_257 , __snake_case : List[str]=1_024 , __snake_case : Dict=768 , __snake_case : Optional[int]=12 , __snake_case : str=12 , __snake_case : List[str]=None , __snake_case : List[str]="gelu_pytorch_tanh" , __snake_case : Optional[Any]=0.1 , __snake_case : str=0.1 , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=1E-5 , __snake_case : Dict=0.02 , __snake_case : Optional[int]=True , __snake_case : Tuple=True , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=True , __snake_case : Optional[Any]=True , __snake_case : List[Any]=True , **__snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : int = n_positions
UpperCAmelCase_ : Any = n_embd
UpperCAmelCase_ : Union[str, Any] = n_layer
UpperCAmelCase_ : List[str] = n_head
UpperCAmelCase_ : List[Any] = n_inner
UpperCAmelCase_ : Optional[int] = activation_function
UpperCAmelCase_ : str = resid_pdrop
UpperCAmelCase_ : Optional[Any] = embd_pdrop
UpperCAmelCase_ : str = attn_pdrop
UpperCAmelCase_ : Any = layer_norm_epsilon
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Optional[int] = scale_attn_weights
UpperCAmelCase_ : Union[str, Any] = use_cache
UpperCAmelCase_ : Dict = attention_softmax_in_fpaa
UpperCAmelCase_ : Union[str, Any] = scale_attention_softmax_in_fpaa
UpperCAmelCase_ : Optional[int] = multi_query
UpperCAmelCase_ : Optional[Any] = bos_token_id
UpperCAmelCase_ : Tuple = eos_token_id
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
| 641
| 0
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__UpperCamelCase : int = logging.get_logger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
def __init__( self : List[Any] , *__snake_case : List[str] , **__snake_case : List[str] ):
'''simple docstring'''
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 720
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( __lowercase , __lowercase , __lowercase=None , **__lowercase ):
UpperCAmelCase_ : Tuple = [x.strip() for x in open(__lowercase ).readlines()]
UpperCAmelCase_ : Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )]
UpperCAmelCase_ : int = calculate_rouge(__lowercase , __lowercase , **__lowercase )
if save_path is not None:
save_json(__lowercase , __lowercase , indent=__lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 641
| 0
|
from torch import nn
def snake_case_ ( __lowercase ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 721
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : int , __snake_case : List[Any] , __snake_case : List[Any]=13 , __snake_case : str=7 , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=True , __snake_case : str=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=True , __snake_case : List[str]=False , __snake_case : List[str]=False , __snake_case : Tuple=False , __snake_case : List[str]=2 , __snake_case : Optional[int]=99 , __snake_case : Tuple=0 , __snake_case : int=32 , __snake_case : Optional[int]=5 , __snake_case : str=4 , __snake_case : str=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : List[str]=512 , __snake_case : Tuple=2 , __snake_case : List[Any]=0.02 , __snake_case : Any=2 , __snake_case : Optional[int]=4 , __snake_case : Optional[Any]="last" , __snake_case : Dict=True , __snake_case : Any=None , __snake_case : str=0 , ):
'''simple docstring'''
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : List[Any] = use_input_lengths
UpperCAmelCase_ : Dict = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : int = gelu_activation
UpperCAmelCase_ : str = sinusoidal_embeddings
UpperCAmelCase_ : List[str] = causal
UpperCAmelCase_ : Tuple = asm
UpperCAmelCase_ : List[Any] = n_langs
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Any = n_special
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : List[Any] = num_choices
UpperCAmelCase_ : Any = summary_type
UpperCAmelCase_ : Optional[int] = use_proj
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = bos_token_id
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
if self.use_input_lengths:
UpperCAmelCase_ : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : int = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case , lengths=__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : str , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Tuple , __snake_case : Any , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
UpperCAmelCase_ : Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : str , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : List[str] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
UpperCAmelCase_ : Optional[Any] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((UpperCAmelCase_) , ) : Union[str, Any] = result_with_labels.to_tuple()
UpperCAmelCase_ : Optional[int] = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((UpperCAmelCase_) , ) : str = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : Optional[int] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : Any , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Optional[int] = XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : List[str] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : List[Any] , __snake_case : str , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ : int = self.num_choices
UpperCAmelCase_ : int = XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Any = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A_ : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self : str , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : str=False ):
'''simple docstring'''
UpperCAmelCase_ : int = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
UpperCAmelCase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = XLMModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def _lowerCamelCase ( self : str , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[int] , __snake_case : Union[str, Any]=False , __snake_case : Optional[Any]=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : Dict = min_length + idx + 1
UpperCAmelCase_ : List[Any] = min_length + idx + 1
UpperCAmelCase_ : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def _lowerCamelCase ( self : List[Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[Any]=False , __snake_case : str=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : str = min_length + idx + 1
UpperCAmelCase_ : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(__snake_case )
UpperCAmelCase_ : str = torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
UpperCAmelCase_ : Union[str, Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase_ : Dict = model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 641
| 0
|
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
lowercase__ = 'laion/clap-htsat-unfused'
lowercase__ = tempfile.mkdtemp()
def UpperCAmelCase ( self : Dict , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
return RobertaTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase)
def UpperCAmelCase ( self : List[str] , **lowerCAmelCase : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_feature_extractor()
lowercase__ = ClapProcessor(tokenizer=lowerCAmelCase , feature_extractor=lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowercase__ = ClapProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer , lowerCAmelCase)
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor , lowerCAmelCase)
def UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor())
processor.save_pretrained(self.tmpdirname)
lowercase__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
lowercase__ = self.get_feature_extractor(do_normalize=lowerCAmelCase , padding_value=1.0)
lowercase__ = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCAmelCase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowerCAmelCase)
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor , lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.get_feature_extractor()
lowercase__ = self.get_tokenizer()
lowercase__ = ClapProcessor(tokenizer=lowerCAmelCase , feature_extractor=lowerCAmelCase)
lowercase__ = floats_list((3, 10_00))
lowercase__ = feature_extractor(lowerCAmelCase , return_tensors='np')
lowercase__ = processor(audios=lowerCAmelCase , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.get_feature_extractor()
lowercase__ = self.get_tokenizer()
lowercase__ = ClapProcessor(tokenizer=lowerCAmelCase , feature_extractor=lowerCAmelCase)
lowercase__ = 'This is a test string'
lowercase__ = processor(text=lowerCAmelCase)
lowercase__ = tokenizer(lowerCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def UpperCAmelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_feature_extractor()
lowercase__ = self.get_tokenizer()
lowercase__ = ClapProcessor(tokenizer=lowerCAmelCase , feature_extractor=lowerCAmelCase)
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(lowerCAmelCase)
lowercase__ = tokenizer.batch_decode(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
lowercase__ = self.get_feature_extractor()
lowercase__ = self.get_tokenizer()
lowercase__ = ClapProcessor(tokenizer=lowerCAmelCase , feature_extractor=lowerCAmelCase)
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 642
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Any = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any]=13 , lowerCAmelCase : List[str]=7 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : str=True , lowerCAmelCase : List[Any]=99 , lowerCAmelCase : Any=32 , lowerCAmelCase : List[str]=2 , lowerCAmelCase : int=4 , lowerCAmelCase : str=37 , lowerCAmelCase : Any="gelu" , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Optional[Any]=5_12 , lowerCAmelCase : Any=16 , lowerCAmelCase : List[str]=2 , lowerCAmelCase : Optional[Any]=0.02 , lowerCAmelCase : Any=3 , lowerCAmelCase : Any=4 , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Optional[int]=0 , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = projection_dim
def UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowercase__ = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__ = ids_tensor([self.batch_size] , self.num_choices)
lowercase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
lowercase__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict())
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : Dict) -> Any:
"""simple docstring"""
lowercase__ = TFDPRContextEncoder(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase , token_type_ids=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def UpperCAmelCase ( self : str , lowerCAmelCase : Any , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFDPRQuestionEncoder(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase , token_type_ids=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def UpperCAmelCase ( self : str , lowerCAmelCase : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : Any) -> List[Any]:
"""simple docstring"""
lowercase__ = TFDPRReader(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase , attention_mask=lowerCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,))
def UpperCAmelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = config_and_inputs
lowercase__ = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Optional[Any] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
A : Optional[Any] = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
A : Any = False
A : Union[str, Any] = False
A : Dict = False
A : Tuple = False
A : Any = False
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFDPRModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37)
def UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*lowerCAmelCase)
@slow
def UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFDPRContextEncoder.from_pretrained(lowerCAmelCase)
self.assertIsNotNone(lowerCAmelCase)
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFDPRContextEncoder.from_pretrained(lowerCAmelCase)
self.assertIsNotNone(lowerCAmelCase)
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFDPRQuestionEncoder.from_pretrained(lowerCAmelCase)
self.assertIsNotNone(lowerCAmelCase)
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFDPRReader.from_pretrained(lowerCAmelCase)
self.assertIsNotNone(lowerCAmelCase)
@require_tf
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
lowercase__ = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base')
lowercase__ = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]]) # [CLS] hello, is my dog cute? [SEP]
lowercase__ = model(lowerCAmelCase)[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowercase__ = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
])
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4))
| 642
|
# Imports
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None) -> Dict:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : str=None , lowerCAmelCase : str=None) -> int:
"""simple docstring"""
if red is not None:
lowercase__ = red
if green is not None:
lowercase__ = green
if blue is not None:
lowercase__ = blue
if red_edge is not None:
lowercase__ = red_edge
if nir is not None:
lowercase__ = nir
return True
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Union[str, Any]="" , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None) -> Union[str, Any]:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
lowercase__ = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!')
return False
def UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[Any]=0.08 , lowerCAmelCase : Optional[int]=1.22 , lowerCAmelCase : int=0.03) -> List[Any]:
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return (self.nir / self.green) - 1
def UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
return (self.red - self.blue) / self.red
def UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
return self.nir - self.green
def UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def UpperCAmelCase ( self : int , lowerCAmelCase : int=0.16) -> Dict:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase ( self : str , lowerCAmelCase : Optional[int]=0.5) -> Union[str, Any]:
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=None) -> Tuple:
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
lowercase__ = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
lowercase__ = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def UpperCAmelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 642
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Tuple = "microsoft/speecht5_tts"
A : Dict = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
A : str = "text_reader"
A : Dict = SpeechTaProcessor
A : Dict = SpeechTaForTextToSpeech
A : Union[str, Any] = SpeechTaHifiGan
A : Dict = ["text"]
A : int = ["audio"]
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
if self.post_processor is None:
lowercase__ = 'microsoft/speecht5_hifigan'
super().setup()
def UpperCAmelCase ( self : Any , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int]=None) -> int:
"""simple docstring"""
lowercase__ = self.pre_processor(text=lowerCAmelCase , return_tensors='pt' , truncation=lowerCAmelCase)
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.')
lowercase__ = load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation')
lowercase__ = torch.tensor(embeddings_dataset[73_05]['xvector']).unsqueeze(0)
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str) -> Dict:
"""simple docstring"""
with torch.no_grad():
return self.post_processor(lowerCAmelCase).cpu().detach()
| 642
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class UpperCAmelCase__( unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = load_tool('text-classification')
self.tool.setup()
lowercase__ = load_tool('text-classification' , remote=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
lowercase__ = self.tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.remote_tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
lowercase__ = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
| 642
| 1
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Tuple = (KDPMaDiscreteScheduler,)
A : str = 10
def UpperCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {
'num_train_timesteps': 11_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**lowerCAmelCase)
return config
def UpperCAmelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase)
def UpperCAmelCase ( self : Tuple) -> Dict:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02]):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase)
def UpperCAmelCase ( self : str) -> Any:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(prediction_type='v_prediction')
lowercase__ = scheduler_class(**lowerCAmelCase)
scheduler.set_timesteps(self.num_inference_steps)
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase__ = sample.to(lowerCAmelCase)
for i, t in enumerate(scheduler.timesteps):
lowercase__ = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase)
lowercase__ = model(lowerCAmelCase , lowerCAmelCase)
lowercase__ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
lowercase__ = output.prev_sample
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4E-0_7) < 1E-2
assert abs(result_mean.item() - 6.1_1_1_2E-1_0) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2E-0_7) < 1E-2
assert abs(result_mean.item() - 0.00_02) < 1E-3
def UpperCAmelCase ( self : Dict) -> Any:
"""simple docstring"""
if torch_device == "mps":
return
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
scheduler.set_timesteps(self.num_inference_steps)
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase__ = sample.to(lowerCAmelCase)
for i, t in enumerate(scheduler.timesteps):
lowercase__ = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase)
lowercase__ = model(lowerCAmelCase , lowerCAmelCase)
lowercase__ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
lowercase__ = output.prev_sample
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25) < 1E-2
assert abs(result_mean.item() - 0.02_66) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25) < 1E-2
assert abs(result_mean.item() - 0.02_66) < 1E-3
def UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
if torch_device == "mps":
return
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase)
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter.to(lowerCAmelCase) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowercase__ = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase)
lowercase__ = model(lowerCAmelCase , lowerCAmelCase)
lowercase__ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
lowercase__ = output.prev_sample
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
if str(lowerCAmelCase).startswith('cpu'):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25) < 1E-2
assert abs(result_mean.item() - 0.02_66) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25) < 1E-2
assert abs(result_mean.item() - 0.02_66) < 1E-3
| 642
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[Any] = None
A : Optional[int] = None
@property
def UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(lowerCAmelCase , 'feature_size'))
self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate'))
self.assertTrue(hasattr(lowerCAmelCase , 'padding_value'))
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(lowerCAmelCase) == len(lowerCAmelCase) for x, y in zip(lowerCAmelCase , processed_features[input_name])))
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='np')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='tf')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def UpperCAmelCase ( self : str , lowerCAmelCase : str=False) -> Union[str, Any]:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = self.feat_extract_tester.seq_length_diff
lowercase__ = self.feat_extract_tester.max_seq_length + pad_diff
lowercase__ = self.feat_extract_tester.min_seq_length
lowercase__ = self.feat_extract_tester.batch_size
lowercase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , padding=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest')
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1]))
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
lowercase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length')[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
self.assertTrue(all(len(lowerCAmelCase) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
lowercase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCAmelCase) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
lowercase__ = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1E-3)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict=False) -> str:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : str , lowerCAmelCase : Optional[Any]):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
# truncate to smallest
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]))
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to smallest with np
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np' , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to middle
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length' , truncation=lowerCAmelCase)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = 12
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , )
lowercase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase__ = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
lowercase__ = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
@require_torch
def UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='pt')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2)
@require_tf
def UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='tf')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1E-2)
def UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = min(lowerCAmelCase)
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 642
| 1
|
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Dict=13 , lowerCAmelCase : str=7 , lowerCAmelCase : Dict=True , lowerCAmelCase : str=True , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=99 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Union[str, Any]=5 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : Union[str, Any]=64 , lowerCAmelCase : List[str]="gelu" , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Optional[int]=5_12 , lowerCAmelCase : Any=16 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Dict=0.02 , lowerCAmelCase : List[str]=3 , lowerCAmelCase : Optional[int]=4 , lowerCAmelCase : List[str]=None , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
return MPNetConfig.from_pretrained('microsoft/mpnet-base')
def UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__ = ids_tensor([self.batch_size] , self.num_choices)
lowercase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self : Any , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str) -> int:
"""simple docstring"""
lowercase__ = MPNetModel(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = model(lowerCAmelCase , lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any) -> List[Any]:
"""simple docstring"""
lowercase__ = MPNetForQuestionAnswering(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Any) -> int:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = MPNetForSequenceClassification(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any]) -> str:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = MPNetForMultipleChoice(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase__ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase__ = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict) -> Any:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = MPNetForTokenClassification(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
((lowercase__), (lowercase__), (lowercase__), (lowercase__), (lowercase__), (lowercase__)) = config_and_inputs
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
A : List[Any] = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
A : List[Any] = False
A : Dict = True
def UpperCAmelCase ( self : str) -> Any:
"""simple docstring"""
lowercase__ = MPNetModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37)
def UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Dict) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowerCAmelCase)
def UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowerCAmelCase)
@require_torch
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
lowercase__ = MPNetModel.from_pretrained('microsoft/mpnet-base')
lowercase__ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
lowercase__ = model(lowerCAmelCase)[0]
lowercase__ = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , lowerCAmelCase)
lowercase__ = torch.tensor(
[[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]])
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase , atol=1E-4))
| 642
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowerCAmelCase ( A__ ):
lowercase__ = prime_factors(A__ )
if is_square_free(A__ ):
return -1 if len(A__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
| 1
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
a__ : str = logging.get_logger(__name__)
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = WavaVecaForSequenceClassification.from_pretrained(A__ , config=A__ )
lowercase__ = downstream_dict['projector.weight']
lowercase__ = downstream_dict['projector.bias']
lowercase__ = downstream_dict['model.post_net.linear.weight']
lowercase__ = downstream_dict['model.post_net.linear.bias']
return model
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = WavaVecaForAudioFrameClassification.from_pretrained(A__ , config=A__ )
lowercase__ = downstream_dict['model.linear.weight']
lowercase__ = downstream_dict['model.linear.bias']
return model
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = WavaVecaForXVector.from_pretrained(A__ , config=A__ )
lowercase__ = downstream_dict['connector.weight']
lowercase__ = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowercase__ = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
lowercase__ = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
lowercase__ = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
lowercase__ = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
lowercase__ = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
lowercase__ = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
lowercase__ = downstream_dict['objective.W']
return model
@torch.no_grad()
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__ = torch.load(A__ , map_location='cpu' )
lowercase__ = checkpoint['Downstream']
lowercase__ = WavaVecaConfig.from_pretrained(A__ )
lowercase__ = WavaVecaFeatureExtractor.from_pretrained(
A__ , return_attention_mask=A__ , do_normalize=A__ )
lowercase__ = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
lowercase__ = convert_classification(A__ , A__ , A__ )
elif arch.endswith('ForAudioFrameClassification' ):
lowercase__ = convert_diarization(A__ , A__ , A__ )
elif arch.endswith('ForXVector' ):
lowercase__ = convert_xvector(A__ , A__ , A__ )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
lowercase__ = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(A__ )
hf_model.save_pretrained(A__ )
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
a__ : Tuple = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 642
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ : List[str] = logging.get_logger(__name__)
a__ : List[Any] = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
A : List[str] = "focalnet"
def __init__( self : Dict , lowerCAmelCase : Union[str, Any]=2_24 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : int=3 , lowerCAmelCase : Union[str, Any]=96 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : int=[1_92, 3_84, 7_68, 7_68] , lowerCAmelCase : str=[2, 2, 6, 2] , lowerCAmelCase : Tuple=[2, 2, 2, 2] , lowerCAmelCase : Optional[Any]=[3, 3, 3, 3] , lowerCAmelCase : int="gelu" , lowerCAmelCase : Any=4.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Tuple=1E-4 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : str=0.02 , lowerCAmelCase : Optional[int]=1E-5 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : str , ) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = use_conv_embed
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = focal_levels
lowercase__ = focal_windows
lowercase__ = hidden_act
lowercase__ = mlp_ratio
lowercase__ = hidden_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = use_layerscale
lowercase__ = layerscale_value
lowercase__ = use_post_layernorm
lowercase__ = use_post_layernorm_in_modulation
lowercase__ = normalize_modulator
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = encoder_stride
lowercase__ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(self.depths) + 1)]
lowercase__, lowercase__ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names)
| 642
| 1
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
a__ : str = logging.getLogger(__name__)
def _lowerCAmelCase ( A__=2 , A__=3 , A__=16 , A__ = 10 , A__ = 2 ):
def get_dataset(A__ ):
lowercase__ = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(A__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
lowercase__ = get_dataset(A__ )
lowercase__ = get_dataset(A__ )
lowercase__ = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 )
lowercase__ = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__=None ):
lowercase__ = []
for epoch in range(A__ ):
# Train quickly
model.train()
for batch in dataloader:
lowercase__, lowercase__ = batch
lowercase__ = model(A__ )
lowercase__ = torch.nn.functional.mse_loss(A__ , A__ )
accelerator.backward(A__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class UpperCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple) -> Tuple:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Parameter(torch.randn(1))
lowercase__ = nn.Parameter(torch.randn(1))
def UpperCAmelCase ( self : Any , lowerCAmelCase : str) -> List[Any]:
"""simple docstring"""
return x * self.a + self.b
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
lowercase__ = DummyModel()
lowercase__ = torch.optim.Adam(params=model.parameters() , lr=1E-3)
lowercase__, lowercase__ = dummy_dataloaders()
lowercase__ = ProjectConfiguration(total_limit=1 , project_dir=lowerCAmelCase , automatic_checkpoint_naming=lowerCAmelCase)
# Train baseline
lowercase__ = Accelerator(project_config=lowerCAmelCase)
lowercase__, lowercase__, lowercase__, lowercase__ = accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir)) , 1)
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
lowercase__ = DummyModel()
lowercase__ = torch.optim.Adam(params=model.parameters() , lr=1E-3)
lowercase__, lowercase__ = dummy_dataloaders()
# Train baseline
lowercase__ = Accelerator()
lowercase__, lowercase__, lowercase__, lowercase__ = accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
# Save initial
lowercase__ = os.path.join(lowerCAmelCase , 'initial')
accelerator.save_state(lowerCAmelCase)
((lowercase__), (lowercase__)) = model.a.item(), model.b.item()
lowercase__ = optimizer.state_dict()
lowercase__ = train(3 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
((lowercase__), (lowercase__)) = model.a.item(), model.b.item()
lowercase__ = optimizer.state_dict()
# Train partially
set_seed(42)
lowercase__ = DummyModel()
lowercase__ = torch.optim.Adam(params=model.parameters() , lr=1E-3)
lowercase__, lowercase__ = dummy_dataloaders()
lowercase__ = Accelerator()
lowercase__, lowercase__, lowercase__, lowercase__ = accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
accelerator.load_state(lowerCAmelCase)
((lowercase__), (lowercase__)) = model.a.item(), model.b.item()
lowercase__ = optimizer.state_dict()
self.assertEqual(lowerCAmelCase , lowerCAmelCase)
self.assertEqual(lowerCAmelCase , lowerCAmelCase)
self.assertEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = train(2 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
# Save everything
lowercase__ = os.path.join(lowerCAmelCase , 'checkpoint')
accelerator.save_state(lowerCAmelCase)
# Load everything back in and make sure all states work
accelerator.load_state(lowerCAmelCase)
test_rands += train(1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
((lowercase__), (lowercase__)) = model.a.item(), model.b.item()
lowercase__ = optimizer.state_dict()
self.assertEqual(lowerCAmelCase , lowerCAmelCase)
self.assertEqual(lowerCAmelCase , lowerCAmelCase)
self.assertEqual(lowerCAmelCase , lowerCAmelCase)
self.assertEqual(lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
lowercase__ = DummyModel()
lowercase__ = torch.optim.Adam(params=model.parameters() , lr=1E-3)
lowercase__, lowercase__ = dummy_dataloaders()
lowercase__ = ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase)
# Train baseline
lowercase__ = Accelerator(project_dir=lowerCAmelCase , project_config=lowerCAmelCase)
lowercase__, lowercase__, lowercase__, lowercase__ = accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
# Save initial
accelerator.save_state()
((lowercase__), (lowercase__)) = model.a.item(), model.b.item()
lowercase__ = optimizer.state_dict()
lowercase__ = train(3 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
((lowercase__), (lowercase__)) = model.a.item(), model.b.item()
lowercase__ = optimizer.state_dict()
# Train partially
set_seed(42)
lowercase__ = DummyModel()
lowercase__ = torch.optim.Adam(params=model.parameters() , lr=1E-3)
lowercase__, lowercase__ = dummy_dataloaders()
lowercase__ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowerCAmelCase)
lowercase__ = Accelerator(project_dir=lowerCAmelCase , project_config=lowerCAmelCase)
lowercase__, lowercase__, lowercase__, lowercase__ = accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
accelerator.load_state(os.path.join(lowerCAmelCase , 'checkpoints' , 'checkpoint_0'))
((lowercase__), (lowercase__)) = model.a.item(), model.b.item()
lowercase__ = optimizer.state_dict()
self.assertEqual(lowerCAmelCase , lowerCAmelCase)
self.assertEqual(lowerCAmelCase , lowerCAmelCase)
self.assertEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = train(2 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCAmelCase , 'checkpoints' , 'checkpoint_1'))
test_rands += train(1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
((lowercase__), (lowercase__)) = model.a.item(), model.b.item()
lowercase__ = optimizer.state_dict()
self.assertEqual(lowerCAmelCase , lowerCAmelCase)
self.assertEqual(lowerCAmelCase , lowerCAmelCase)
self.assertEqual(lowerCAmelCase , lowerCAmelCase)
self.assertEqual(lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
lowercase__ = torch.tensor([1, 2, 3])
lowercase__ = torch.tensor([2, 3, 4])
lowercase__ = DummyModel()
lowercase__ = torch.optim.Adam(net.parameters())
lowercase__ = Accelerator()
with self.assertRaises(lowerCAmelCase) as ve:
accelerator.register_for_checkpointing(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
lowercase__ = str(ve.exception)
self.assertTrue('Item at index 0' in message)
self.assertTrue('Item at index 1' in message)
self.assertFalse('Item at index 2' in message)
self.assertFalse('Item at index 3' in message)
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
lowercase__ = DummyModel()
lowercase__ = torch.optim.Adam(params=model.parameters() , lr=1E-3)
lowercase__ = torch.optim.lr_scheduler.StepLR(lowerCAmelCase , step_size=1 , gamma=0.99)
lowercase__, lowercase__ = dummy_dataloaders()
lowercase__ = ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase)
# Train baseline
lowercase__ = Accelerator(project_dir=lowerCAmelCase , project_config=lowerCAmelCase)
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
# Save initial
accelerator.save_state()
lowercase__ = scheduler.state_dict()
train(3 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
self.assertNotEqual(lowerCAmelCase , scheduler.state_dict())
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCAmelCase , 'checkpoints' , 'checkpoint_0'))
self.assertEqual(lowerCAmelCase , scheduler.state_dict())
def UpperCAmelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
lowercase__ = DummyModel()
lowercase__ = ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase , total_limit=2)
# Train baseline
lowercase__ = Accelerator(project_dir=lowerCAmelCase , project_config=lowerCAmelCase)
lowercase__ = accelerator.prepare(lowerCAmelCase)
# Save 3 states:
for _ in range(11):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowerCAmelCase , 'checkpoints' , 'checkpoint_0')))
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase , 'checkpoints' , 'checkpoint_9')))
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase , 'checkpoints' , 'checkpoint_10')))
@require_cuda
def UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
lowercase__ = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__)]
execute_subprocess_async(lowerCAmelCase , env=os.environ.copy())
if __name__ == "__main__":
a__ : Tuple = "/tmp/accelerate/state_checkpointing"
a__ : List[str] = DummyModel()
a__ : Any = torch.optim.Adam(params=model.parameters(), lr=1E-3)
a__ : Any = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
a__ , a__ : List[Any] = dummy_dataloaders()
a__ : Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
a__ : Any = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
a__ , a__ , a__ , a__ , a__ : Any = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
a__ , a__ : Union[str, Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
a__ : Dict = group["params"][0].device
break
assert param_device.type == accelerator.device.type
a__ : Tuple = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
a__ : List[Any] = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
a__ : Union[str, Any] = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 642
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Dict = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
a__ : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
a__ : Any = {"facebook/blenderbot_small-90M": 5_12}
def _lowerCAmelCase ( A__ ):
lowercase__ = set()
lowercase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ = char
lowercase__ = set(A__ )
return pairs
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[str] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : int="__start__" , lowerCAmelCase : Dict="__end__" , lowerCAmelCase : Any="__unk__" , lowerCAmelCase : str="__null__" , **lowerCAmelCase : Optional[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , **lowerCAmelCase)
with open(lowerCAmelCase , encoding='utf-8') as vocab_handle:
lowercase__ = json.load(lowerCAmelCase)
lowercase__ = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase , encoding='utf-8') as merges_handle:
lowercase__ = merges_handle.read().split('\n')[1:-1]
lowercase__ = [tuple(merge.split()) for merge in merges]
lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase))))
lowercase__ = {}
@property
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return len(self.encoder)
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase ( self : str , lowerCAmelCase : str) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ = re.sub('([.,!?()])' , R' \1' , lowerCAmelCase)
lowercase__ = re.sub('(\')' , R' \1 ' , lowerCAmelCase)
lowercase__ = re.sub(R'\s{2,}' , ' ' , lowerCAmelCase)
if "\n" in token:
lowercase__ = token.replace('\n' , ' __newln__')
lowercase__ = token.split(' ')
lowercase__ = []
for token in tokens:
if not len(lowerCAmelCase):
continue
lowercase__ = token.lower()
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = tuple(list(word[:-1]) + [word[-1] + '</w>'])
lowercase__ = get_pairs(lowerCAmelCase)
if not pairs:
words.append(lowerCAmelCase)
continue
while True:
lowercase__ = min(lowerCAmelCase , key=lambda lowerCAmelCase: self.bpe_ranks.get(lowerCAmelCase , float('inf')))
if bigram not in self.bpe_ranks:
break
lowercase__, lowercase__ = bigram
lowercase__ = []
lowercase__ = 0
while i < len(lowerCAmelCase):
try:
lowercase__ = word.index(lowerCAmelCase , lowerCAmelCase)
new_word.extend(word[i:j])
lowercase__ = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(lowerCAmelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = new_word
if len(lowerCAmelCase) == 1:
break
else:
lowercase__ = get_pairs(lowerCAmelCase)
lowercase__ = '@@ '.join(lowerCAmelCase)
lowercase__ = word[:-4]
lowercase__ = word
words.append(lowerCAmelCase)
return " ".join(lowerCAmelCase)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = re.findall(R'\S+\n?' , lowerCAmelCase)
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase).split(' ')))
return split_tokens
def UpperCAmelCase ( self : int , lowerCAmelCase : str) -> int:
"""simple docstring"""
lowercase__ = token.lower()
return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token))
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self.decoder.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : List[str]) -> str:
"""simple docstring"""
lowercase__ = ' '.join(lowerCAmelCase).replace('@@ ' , '').strip()
return out_string
def UpperCAmelCase ( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(lowerCAmelCase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase) + '\n')
lowercase__ = 0
with open(lowerCAmelCase , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase: kv[1]):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!')
lowercase__ = token_index
writer.write(' '.join(lowerCAmelCase) + '\n')
index += 1
return vocab_file, merge_file
| 642
| 1
|
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@require_torch
def UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
lowercase__ = pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused')
lowercase__ = load_dataset('ashraq/esc50')
lowercase__ = dataset['train']['audio'][-1]['array']
lowercase__ = audio_classifier(lowerCAmelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'])
self.assertEqual(
nested_simplify(lowerCAmelCase) , [{'score': 0.5_01, 'label': 'Sound of a dog'}, {'score': 0.4_99, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF')
def UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
pass
@slow
@require_torch
def UpperCAmelCase ( self : Any) -> List[Any]:
"""simple docstring"""
lowercase__ = pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
lowercase__ = load_dataset('ashraq/esc50')
lowercase__ = dataset['train']['audio'][-1]['array']
lowercase__ = audio_classifier(lowerCAmelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'])
self.assertEqual(
nested_simplify(lowerCAmelCase) , [
{'score': 0.9_99, 'label': 'Sound of a dog'},
{'score': 0.0_01, 'label': 'Sound of vaccum cleaner'},
] , )
lowercase__ = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'])
self.assertEqual(
nested_simplify(lowerCAmelCase) , [
[
{'score': 0.9_99, 'label': 'Sound of a dog'},
{'score': 0.0_01, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
lowercase__ = audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5)
self.assertEqual(
nested_simplify(lowerCAmelCase) , [
[
{'score': 0.9_99, 'label': 'Sound of a dog'},
{'score': 0.0_01, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF')
def UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
pass
| 642
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Optional[int] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Any = logging.get_logger(__name__)
a__ : List[Any] = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : int = "fnet"
def __init__( self : Optional[int] , lowerCAmelCase : Optional[int]=3_20_00 , lowerCAmelCase : Optional[int]=7_68 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : Optional[Any]=30_72 , lowerCAmelCase : List[str]="gelu_new" , lowerCAmelCase : Any=0.1 , lowerCAmelCase : int=5_12 , lowerCAmelCase : Tuple=4 , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : Dict=1E-1_2 , lowerCAmelCase : List[str]=False , lowerCAmelCase : Optional[int]=5_12 , lowerCAmelCase : Tuple=3 , lowerCAmelCase : Any=1 , lowerCAmelCase : Tuple=2 , **lowerCAmelCase : List[str] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase)
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = initializer_range
lowercase__ = type_vocab_size
lowercase__ = layer_norm_eps
lowercase__ = use_tpu_fourier_optimizations
lowercase__ = tpu_short_seq_length
| 642
|
import heapq
import sys
import numpy as np
a__ : Dict = tuple[int, int]
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = []
lowercase__ = set()
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf')
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return len(self.elements) == 0
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str]) -> List[str]:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item))
self.set.add(lowerCAmelCase)
else:
# update
# print("update", item)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pri, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
temp.append((priority, item))
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
if item in self.set:
self.set.remove(lowerCAmelCase)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pro, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy))
def UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
self.set.remove(lowerCAmelCase)
return (priority, item)
def _lowerCAmelCase ( A__ , A__ ):
# euclidean distance
lowercase__ = np.array(A__ )
lowercase__ = np.array(A__ )
return np.linalg.norm(a - b )
def _lowerCAmelCase ( A__ , A__ ):
# integer division by time variable
return consistent_heuristic(A__ , A__ ) // t
def _lowerCAmelCase ( A__ , A__ ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__ = g_function[start] + Wa * heuristics[i](A__ , A__ )
return ans
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = np.chararray((n, n) )
for i in range(A__ ):
for j in range(A__ ):
lowercase__ = '*'
for i in range(A__ ):
for j in range(A__ ):
if (j, (n - 1) - i) in blocks:
lowercase__ = '#'
lowercase__ = '-'
lowercase__ = back_pointer[goal]
while x != start:
((lowercase__), (lowercase__)) = x
# print(x)
lowercase__ = '-'
lowercase__ = back_pointer[x]
lowercase__ = '-'
for i in range(A__ ):
for j in range(A__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowercase__ = back_pointer[goal]
while x != start:
print(A__ , end=' ' )
lowercase__ = back_pointer[x]
print(A__ )
sys.exit()
def _lowerCAmelCase ( A__ ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
for itera in range(A__ ):
open_list[itera].remove_element(A__ )
# print("s", s)
# print("j", j)
((lowercase__), (lowercase__)) = s
lowercase__ = (x - 1, y)
lowercase__ = (x + 1, y)
lowercase__ = (x, y + 1)
lowercase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(A__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(A__ )
lowercase__ = -1
lowercase__ = float('inf' )
if valid(A__ ) and g_function[neighbours] > g_function[s] + 1:
lowercase__ = g_function[s] + 1
lowercase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(A__ , key(A__ , 0 , A__ , A__ ) )
if neighbours not in close_list_inad:
for var in range(1 , A__ ):
if key(A__ , A__ , A__ , A__ ) <= Wa * key(
A__ , 0 , A__ , A__ ):
open_list[j].put(
A__ , key(A__ , A__ , A__ , A__ ) )
def _lowerCAmelCase ( ):
lowercase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a__ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a__ : Any = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a__ : Any = make_common_ground()
a__ : Union[str, Any] = blocks_blk
# hyper parameters
a__ : List[Any] = 1
a__ : List[str] = 1
a__ : Optional[int] = 20
a__ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a__ : Tuple = (0, 0)
a__ : str = (n - 1, n - 1)
a__ : Optional[Any] = 1
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = {start: 0, goal: float('inf' )}
lowercase__ = {start: -1, goal: -1}
lowercase__ = []
lowercase__ = set()
for i in range(A__ ):
open_list.append(PriorityQueue() )
open_list[i].put(A__ , key(A__ , A__ , A__ , A__ ) )
lowercase__ = []
lowercase__ = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , A__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__, lowercase__ = open_list[i].top_show()
visited.add(A__ )
expand_state(
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_inad.append(A__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__ = open_list[0].top_show()
visited.add(A__ )
expand_state(
A__ , 0 , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_anchor.append(A__ )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(A__ ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 642
| 1
|
import math
def _lowerCAmelCase ( A__ ):
return math.sqrt(A__ ) * math.sqrt(A__ ) == num
def _lowerCAmelCase ( A__ ):
lowercase__ = 0
lowercase__ = n
while left <= right:
lowercase__ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowercase__ = mid - 1
else:
lowercase__ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
|
import math
import sys
def _lowerCAmelCase ( A__ ):
lowercase__ = ''
try:
with open(A__ , 'rb' ) as binary_file:
lowercase__ = binary_file.read()
for dat in data:
lowercase__ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = {'0': '0', '1': '1'}
lowercase__, lowercase__ = '', ''
lowercase__ = len(A__ )
for i in range(len(A__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase__ = lexicon[curr_string]
result += last_match_id
lowercase__ = last_match_id + '0'
if math.loga(A__ ).is_integer():
lowercase__ = {}
for curr_key in list(A__ ):
lowercase__ = lexicon.pop(A__ )
lowercase__ = new_lex
lowercase__ = last_match_id + '1'
index += 1
lowercase__ = ''
return result
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = 8
try:
with open(A__ , 'wb' ) as opened_file:
lowercase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(A__ ) , A__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase__ = data_bits[counter:]
lowercase__ = data_bits[counter + 1 :]
return data_bits
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = read_file_binary(A__ )
lowercase__ = remove_prefix(A__ )
lowercase__ = decompress_data(A__ )
write_file_binary(A__ , A__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 642
| 1
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[Any] = None
A : Optional[int] = None
@property
def UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(lowerCAmelCase , 'feature_size'))
self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate'))
self.assertTrue(hasattr(lowerCAmelCase , 'padding_value'))
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(lowerCAmelCase) == len(lowerCAmelCase) for x, y in zip(lowerCAmelCase , processed_features[input_name])))
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='np')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='tf')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def UpperCAmelCase ( self : str , lowerCAmelCase : str=False) -> Union[str, Any]:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = self.feat_extract_tester.seq_length_diff
lowercase__ = self.feat_extract_tester.max_seq_length + pad_diff
lowercase__ = self.feat_extract_tester.min_seq_length
lowercase__ = self.feat_extract_tester.batch_size
lowercase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , padding=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest')
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1]))
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
lowercase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length')[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
self.assertTrue(all(len(lowerCAmelCase) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
lowercase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCAmelCase) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
lowercase__ = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1E-3)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict=False) -> str:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : str , lowerCAmelCase : Optional[Any]):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
# truncate to smallest
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]))
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to smallest with np
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np' , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to middle
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length' , truncation=lowerCAmelCase)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = 12
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , )
lowercase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase__ = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
lowercase__ = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
@require_torch
def UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='pt')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2)
@require_tf
def UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='tf')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1E-2)
def UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = min(lowerCAmelCase)
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 642
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Tuple = {"vocab_file": "vocab.txt"}
a__ : int = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
a__ : Dict = {
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def _lowerCAmelCase ( A__ ):
with open(A__ , 'r' ) as f:
lowercase__ = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]="<unk>" , lowerCAmelCase : Dict="<cls>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : Union[str, Any]="<mask>" , lowerCAmelCase : Optional[Any]="<eos>" , **lowerCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = load_vocab_file(lowerCAmelCase)
lowercase__ = dict(enumerate(self.all_tokens))
lowercase__ = {tok: ind for ind, tok in enumerate(self.all_tokens)}
lowercase__ = unk_token
lowercase__ = cls_token
lowercase__ = pad_token
lowercase__ = mask_token
lowercase__ = eos_token
lowercase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens)
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any]) -> Dict:
"""simple docstring"""
return text.split()
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Any=False) -> Union[str, Any]:
"""simple docstring"""
return len(self._id_to_token)
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens)}
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Dict , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.cls_token_id]
lowercase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!')
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List , lowerCAmelCase : Optional[List] = None , lowerCAmelCase : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase__ = [1] + ([0] * len(lowerCAmelCase)) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCAmelCase) + [1]
return mask
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = os.path.join(lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt')
with open(lowerCAmelCase , 'w') as f:
f.write('\n'.join(self.all_tokens))
return (vocab_file,)
@property
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Union[List[str], List[AddedToken]] , lowerCAmelCase : bool = False) -> int:
"""simple docstring"""
return super()._add_tokens(lowerCAmelCase , special_tokens=lowerCAmelCase)
| 642
| 1
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
a__ : Dict = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def _lowerCAmelCase ( A__ , A__ ):
inspect_dataset(A__ , A__ )
lowercase__ = path + '.py'
assert script_name in os.listdir(A__ )
assert "__pycache__" not in os.listdir(A__ )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def _lowerCAmelCase ( A__ , A__ ):
inspect_metric(A__ , A__ )
lowercase__ = path + '.py'
assert script_name in os.listdir(A__ )
assert "__pycache__" not in os.listdir(A__ )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = get_dataset_config_info(A__ , config_name=A__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _lowerCAmelCase ( A__ , A__ , A__ ):
with pytest.raises(A__ ):
get_dataset_config_info(A__ , config_name=A__ )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = get_dataset_config_names(A__ )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = get_dataset_infos(A__ )
assert list(infos.keys() ) == expected_configs
lowercase__ = expected_configs[0]
assert expected_config in infos
lowercase__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = get_dataset_infos(A__ )
assert expected_config in infos
lowercase__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _lowerCAmelCase ( A__ , A__ , A__ ):
with pytest.raises(A__ ):
get_dataset_split_names(A__ , config_name=A__ )
| 642
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a__ : int = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
a__ : Optional[Any] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
a__ : Tuple = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any]) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , )
def UpperCAmelCase ( self : int , lowerCAmelCase : List[List[List[str]]] , lowerCAmelCase : List[List[str]] , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase , hypotheses=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase)
}
| 642
| 1
|
from math import factorial
def _lowerCAmelCase ( A__ , A__ ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('Please enter positive integers for n and k where n >= k' )
return factorial(A__ ) // (factorial(A__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
F'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"If a class of 40 students must be arranged into groups of",
F'''4 for group projects, there are {combinations(40, 4)} ways''',
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
F'''are {combinations(10, 3)} ways that first, second and''',
"third place can be awarded.",
)
| 642
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Dict=13 , lowerCAmelCase : Dict=7 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=99 , lowerCAmelCase : List[Any]=[1, 1, 2] , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : int=32 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Tuple=8 , lowerCAmelCase : int=37 , lowerCAmelCase : Any="gelu_new" , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : str=5_12 , lowerCAmelCase : str=3 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Any=4 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[int]=False , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = block_sizes
lowercase__ = num_decoder_layers
lowercase__ = d_model
lowercase__ = n_head
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = hidden_act
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = 2
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowercase__ = n_head
# Used in the tests to check the size of the first hidden state
lowercase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowercase__ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowercase__ = self.num_hidden_layers + 2
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__ = ids_tensor([self.batch_size] , self.num_choices)
lowercase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , ) -> int:
"""simple docstring"""
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , ) -> str:
"""simple docstring"""
lowercase__ = TFFunnelForPreTraining(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForMaskedLM(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForSequenceClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = TFFunnelForMultipleChoice(config=lowerCAmelCase)
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForTokenClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForQuestionAnswering(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = config_and_inputs
lowercase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
A : Dict = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
A : Optional[int] = False
A : Optional[int] = False
def UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase)
@require_tf
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
A : List[str] = False
A : int = False
def UpperCAmelCase ( self : Any) -> List[Any]:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self , base=lowerCAmelCase)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase)
| 642
| 1
|
import csv
import tweepy
# Twitter API credentials
a__ : Optional[int] = ""
a__ : Union[str, Any] = ""
a__ : str = ""
a__ : Optional[Any] = ""
def _lowerCAmelCase ( A__ ):
# authorize twitter, initialize tweepy
lowercase__ = tweepy.OAuthHandler(A__ , A__ )
auth.set_access_token(A__ , A__ )
lowercase__ = tweepy.API(A__ )
# initialize a list to hold all the tweepy Tweets
lowercase__ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowercase__ = api.user_timeline(screen_name=A__ , count=200 )
# save most recent tweets
alltweets.extend(A__ )
# save the id of the oldest tweet less one
lowercase__ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(A__ ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
lowercase__ = api.user_timeline(
screen_name=A__ , count=200 , max_id=A__ )
# save most recent tweets
alltweets.extend(A__ )
# update the id of the oldest tweet less one
lowercase__ = alltweets[-1].id - 1
print(F'''...{len(A__ )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
lowercase__ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' , 'w' ) as f:
lowercase__ = csv.writer(A__ )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(A__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 642
|
def _lowerCAmelCase ( A__ , A__ , A__ ):
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
lowercase__ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowercase__ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
| 1
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _lowerCAmelCase ( ):
lowercase__ = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
lowercase__ = Image.open(requests.get(A__ , stream=A__ ).raw ).convert('RGB' )
return image
def _lowerCAmelCase ( A__ ):
lowercase__ = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = dct.pop(A__ )
lowercase__ = val
def _lowerCAmelCase ( A__ , A__ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase__ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowercase__ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowercase__ = torch.cat((q_bias, torch.zeros_like(A__ , requires_grad=A__ ), v_bias) )
lowercase__ = qkv_bias
def _lowerCAmelCase ( A__ ):
lowercase__ = 364 if 'coco' in model_name else 224
lowercase__ = InstructBlipVisionConfig(image_size=A__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowercase__ = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase__ = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowercase__ = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=32_001 ).to_dict()
elif "vicuna-13b" in model_name:
lowercase__ = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=32_001 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowercase__ = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict()
lowercase__ = InstructBlipConfig(vision_config=A__ , text_config=A__ , qformer_config=A__ )
return config, image_size
@torch.no_grad()
def _lowerCAmelCase ( A__ , A__=None , A__=False ):
lowercase__ = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
lowercase__ = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowercase__ = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
lowercase__, lowercase__ = get_blipa_config(A__ )
lowercase__ = InstructBlipForConditionalGeneration(A__ ).eval()
lowercase__ = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
lowercase__, lowercase__ = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
lowercase__ = 'cuda:1' if torch.cuda.is_available() else 'cpu'
lowercase__ = 'cuda:2' if torch.cuda.is_available() else 'cpu'
lowercase__, lowercase__, lowercase__ = load_model_and_preprocess(
name=A__ , model_type=A__ , is_eval=A__ , device=A__ )
original_model.eval()
print('Done!' )
# update state dict keys
lowercase__ = original_model.state_dict()
lowercase__ = create_rename_keys(A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase__ = state_dict.pop(A__ )
if key.startswith('Qformer.bert' ):
lowercase__ = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
lowercase__ = key.replace('self' , 'attention' )
if "llm_proj" in key:
lowercase__ = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
lowercase__ = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
lowercase__ = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
lowercase__ = key.replace('t5' , 'language' )
lowercase__ = val
# read in qv biases
read_in_q_v_bias(A__ , A__ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(A__ , strict=A__ )
lowercase__ = load_demo_image()
lowercase__ = 'What is unusual about this image?'
# create processor
lowercase__ = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=A__ , image_std=A__ )
lowercase__ = InstructBlipProcessor(
image_processor=A__ , tokenizer=A__ , qformer_tokenizer=A__ , )
lowercase__ = processor(images=A__ , text=A__ , return_tensors='pt' ).to(A__ )
# make sure processor creates exact same pixel values
lowercase__ = vis_processors['eval'](A__ ).unsqueeze(0 ).to(A__ )
lowercase__ = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , A__ )
original_model.to(A__ )
hf_model.to(A__ )
with torch.no_grad():
if "vicuna" in model_name:
lowercase__ = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
lowercase__ = hf_model(**A__ ).logits
else:
lowercase__ = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
lowercase__ = tokenizer('\n' , return_tensors='pt' ).input_ids.to(A__ )
lowercase__ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
lowercase__ = hf_model(**A__ , labels=A__ ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowercase__ = 1E-4 if 'vicuna' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , A__ , atol=A__ )
print('Looks ok!' )
print('Generating with original model...' )
lowercase__ = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
lowercase__ = hf_model.generate(
**A__ , do_sample=A__ , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowercase__ = 2
print('Original generation:' , A__ )
lowercase__ = processor.batch_decode(A__ , skip_special_tokens=A__ )
lowercase__ = [text.strip() for text in output_text]
print('HF generation:' , A__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(A__ )
hf_model.save_pretrained(A__ )
if push_to_hub:
processor.push_to_hub(F'''Salesforce/{model_name}''' )
hf_model.push_to_hub(F'''Salesforce/{model_name}''' )
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
a__ : Tuple = [
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
a__ : Any = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 642
|
from __future__ import annotations
def _lowerCAmelCase ( A__ , A__ ):
if b == 0:
return (1, 0)
((lowercase__), (lowercase__)) = extended_euclid(A__ , a % b )
lowercase__ = a // b
return (y, x - k * y)
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
def _lowerCAmelCase ( A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
if b < 0:
lowercase__ = (b % n + n) % n
return b
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__, lowercase__ = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 642
| 1
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
lowercase__ = TapasConfig.from_json_file(A__ )
# set absolute/relative position embeddings parameter
lowercase__ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowercase__ = TapasForQuestionAnswering(config=A__ )
elif task == "WTQ":
# run_task_main.py hparams
lowercase__ = 4
lowercase__ = True
# hparam_utils.py hparams
lowercase__ = 0.66_46_94
lowercase__ = 0.20_79_51
lowercase__ = 0.12_11_94
lowercase__ = True
lowercase__ = True
lowercase__ = False
lowercase__ = 0.0_35_25_13
lowercase__ = TapasForQuestionAnswering(config=A__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowercase__ = 4
lowercase__ = False
# hparam_utils.py hparams
lowercase__ = 36.45_19
lowercase__ = 0.90_34_21
lowercase__ = 2_22.0_88
lowercase__ = True
lowercase__ = True
lowercase__ = True
lowercase__ = 0.76_31_41
lowercase__ = TapasForQuestionAnswering(config=A__ )
elif task == "TABFACT":
lowercase__ = TapasForSequenceClassification(config=A__ )
elif task == "MLM":
lowercase__ = TapasForMaskedLM(config=A__ )
elif task == "INTERMEDIATE_PRETRAINING":
lowercase__ = TapasModel(config=A__ )
else:
raise ValueError(F'''Task {task} not supported.''' )
print(F'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(A__ , A__ , A__ )
# Save pytorch-model (weights and configuration)
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(A__ )
# Save tokenizer files
print(F'''Save tokenizer files to {pytorch_dump_path}''' )
lowercase__ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(A__ )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a__ : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 642
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[Any] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = "umt5"
A : List[str] = ["past_key_values"]
def __init__( self : List[Any] , lowerCAmelCase : Optional[int]=25_01_12 , lowerCAmelCase : str=5_12 , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : int=32 , lowerCAmelCase : int=1_28 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[str]=1E-6 , lowerCAmelCase : Optional[int]=1.0 , lowerCAmelCase : Optional[Any]="gated-gelu" , lowerCAmelCase : List[Any]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]="T5Tokenizer" , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=0 , **lowerCAmelCase : int , ) -> str:
"""simple docstring"""
super().__init__(
is_encoder_decoder=lowerCAmelCase , tokenizer_class=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = d_kv
lowercase__ = d_ff
lowercase__ = num_layers
lowercase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ = num_heads
lowercase__ = relative_attention_num_buckets
lowercase__ = relative_attention_max_distance
lowercase__ = dropout_rate
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_factor
lowercase__ = feed_forward_proj
lowercase__ = use_cache
lowercase__ = self.feed_forward_proj.split('-')
lowercase__ = act_info[-1]
lowercase__ = act_info[0] == 'gated'
if len(lowerCAmelCase) > 1 and act_info[0] != "gated" or len(lowerCAmelCase) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
if feed_forward_proj == "gated-gelu":
lowercase__ = 'gelu_new'
@property
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
return self.d_model
@property
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
return self.num_heads
@property
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return self.num_layers
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCAmelCase ( self : Optional[int]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase__ = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowercase__ = 'past_encoder_sequence + sequence'
lowercase__ = {0: 'batch'}
lowercase__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return 13
@property
def UpperCAmelCase ( self : Optional[Any]) -> float:
"""simple docstring"""
return 5E-4
| 642
| 1
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = checkpoint
lowercase__ = {}
lowercase__ = vae_state_dict['encoder.conv_in.weight']
lowercase__ = vae_state_dict['encoder.conv_in.bias']
lowercase__ = vae_state_dict['encoder.conv_out.weight']
lowercase__ = vae_state_dict['encoder.conv_out.bias']
lowercase__ = vae_state_dict['encoder.norm_out.weight']
lowercase__ = vae_state_dict['encoder.norm_out.bias']
lowercase__ = vae_state_dict['decoder.conv_in.weight']
lowercase__ = vae_state_dict['decoder.conv_in.bias']
lowercase__ = vae_state_dict['decoder.conv_out.weight']
lowercase__ = vae_state_dict['decoder.conv_out.bias']
lowercase__ = vae_state_dict['decoder.norm_out.weight']
lowercase__ = vae_state_dict['decoder.norm_out.bias']
lowercase__ = vae_state_dict['quant_conv.weight']
lowercase__ = vae_state_dict['quant_conv.bias']
lowercase__ = vae_state_dict['post_quant_conv.weight']
lowercase__ = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
lowercase__ = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
lowercase__ = {
layer_id: [key for key in vae_state_dict if F'''down.{layer_id}''' in key] for layer_id in range(A__ )
}
# Retrieves the keys for the decoder up blocks only
lowercase__ = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
lowercase__ = {
layer_id: [key for key in vae_state_dict if F'''up.{layer_id}''' in key] for layer_id in range(A__ )
}
for i in range(A__ ):
lowercase__ = [key for key in down_blocks[i] if F'''down.{i}''' in key and F'''down.{i}.downsample''' not in key]
if F'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
lowercase__ = vae_state_dict.pop(
F'''encoder.down.{i}.downsample.conv.weight''' )
lowercase__ = vae_state_dict.pop(
F'''encoder.down.{i}.downsample.conv.bias''' )
lowercase__ = renew_vae_resnet_paths(A__ )
lowercase__ = {'old': F'''down.{i}.block''', 'new': F'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(A__ , A__ , A__ , additional_replacements=[meta_path] , config=A__ )
lowercase__ = [key for key in vae_state_dict if 'encoder.mid.block' in key]
lowercase__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowercase__ = [key for key in mid_resnets if F'''encoder.mid.block_{i}''' in key]
lowercase__ = renew_vae_resnet_paths(A__ )
lowercase__ = {'old': F'''mid.block_{i}''', 'new': F'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(A__ , A__ , A__ , additional_replacements=[meta_path] , config=A__ )
lowercase__ = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
lowercase__ = renew_vae_attention_paths(A__ )
lowercase__ = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(A__ , A__ , A__ , additional_replacements=[meta_path] , config=A__ )
conv_attn_to_linear(A__ )
for i in range(A__ ):
lowercase__ = num_up_blocks - 1 - i
lowercase__ = [
key for key in up_blocks[block_id] if F'''up.{block_id}''' in key and F'''up.{block_id}.upsample''' not in key
]
if F'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
lowercase__ = vae_state_dict[
F'''decoder.up.{block_id}.upsample.conv.weight'''
]
lowercase__ = vae_state_dict[
F'''decoder.up.{block_id}.upsample.conv.bias'''
]
lowercase__ = renew_vae_resnet_paths(A__ )
lowercase__ = {'old': F'''up.{block_id}.block''', 'new': F'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(A__ , A__ , A__ , additional_replacements=[meta_path] , config=A__ )
lowercase__ = [key for key in vae_state_dict if 'decoder.mid.block' in key]
lowercase__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowercase__ = [key for key in mid_resnets if F'''decoder.mid.block_{i}''' in key]
lowercase__ = renew_vae_resnet_paths(A__ )
lowercase__ = {'old': F'''mid.block_{i}''', 'new': F'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(A__ , A__ , A__ , additional_replacements=[meta_path] , config=A__ )
lowercase__ = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
lowercase__ = renew_vae_attention_paths(A__ )
lowercase__ = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(A__ , A__ , A__ , additional_replacements=[meta_path] , config=A__ )
conv_attn_to_linear(A__ )
return new_checkpoint
def _lowerCAmelCase ( A__ , A__ , ):
# Only support V1
lowercase__ = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
lowercase__ = io.BytesIO(r.content )
lowercase__ = OmegaConf.load(A__ )
lowercase__ = 512
lowercase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
lowercase__ = {}
with safe_open(A__ , framework='pt' , device='cpu' ) as f:
for key in f.keys():
lowercase__ = f.get_tensor(A__ )
else:
lowercase__ = torch.load(A__ , map_location=A__ )['state_dict']
# Convert the VAE model.
lowercase__ = create_vae_diffusers_config(A__ , image_size=A__ )
lowercase__ = custom_convert_ldm_vae_checkpoint(A__ , A__ )
lowercase__ = AutoencoderKL(**A__ )
vae.load_state_dict(A__ )
vae.save_pretrained(A__ )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
a__ : List[str] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 642
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : str = XGLMTokenizer
A : List[Any] = XGLMTokenizerFast
A : int = True
A : Optional[Any] = True
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = '<pad>'
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase) , lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase) , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(len(lowerCAmelCase) , 10_08)
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_08)
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
lowercase__ = tokenizer.tokenize('This is a test')
self.assertListEqual(lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return XGLMTokenizer.from_pretrained('facebook/xglm-564M')
def UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase , f.name)
lowercase__ = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase)
lowercase__ = pickle.dumps(lowerCAmelCase)
pickle.loads(lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = 'I was born in 92000, and this is falsé.'
lowercase__ = tokenizer.tokenize(lowerCAmelCase)
lowercase__ = rust_tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
@slow
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
lowercase__ = 'Hello World!'
lowercase__ = [2, 3_12_27, 44_47, 35]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
lowercase__ = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = {
'input_ids': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name='facebook/xglm-564M' , padding=lowerCAmelCase , )
| 642
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase ( self : Dict) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (3, 32, 1_28)
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase))))
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(lowerCAmelCase) + '\n')
lowercase__ = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 32, 'width': 1_28},
}
lowercase__ = os.path.join(self.tmpdirname , lowerCAmelCase)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : str , **lowerCAmelCase : int) -> Optional[Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase)
def UpperCAmelCase ( self : Dict , **lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def UpperCAmelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
lowercase__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)
lowercase__ = Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1))
return image_input
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowercase__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowercase__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
lowercase__ = self.get_image_processor(do_normalize=lowerCAmelCase , padding_value=1.0)
lowercase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCAmelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase)
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(lowerCAmelCase , return_tensors='np')
lowercase__ = processor(images=lowerCAmelCase , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase)
lowercase__ = 'test'
lowercase__ = processor(text=lowerCAmelCase)
lowercase__ = tokenizer(lowerCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase)
lowercase__ = 'test'
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCAmelCase , images=lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'labels'])
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase):
processor()
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase)
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.char_decode(lowerCAmelCase)
lowercase__ = tokenizer.batch_decode(lowerCAmelCase)
lowercase__ = [seq.replace(' ' , '') for seq in decoded_tok]
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase)
lowercase__ = None
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCAmelCase , images=lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def UpperCAmelCase ( self : int) -> List[str]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase)
lowercase__ = torch.randn(1 , 27 , 38)
lowercase__ = torch.randn(1 , 27 , 5_02_57)
lowercase__ = torch.randn(1 , 27 , 3_05_22)
lowercase__ = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'])
| 642
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
lowercase__ = data
lowercase__ = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0]
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]) -> str:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = B'\x80' + B'\x00' * (63 - (len(self.data) + 8) % 64)
lowercase__ = self.data + padding + struct.pack('>Q' , 8 * len(self.data))
return padded_data
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64)
]
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> List[Any]:
"""simple docstring"""
lowercase__ = list(struct.unpack('>16L' , lowerCAmelCase)) + [0] * 64
for i in range(16 , 80):
lowercase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1)
return w
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.padding()
lowercase__ = self.split_blocks()
for block in self.blocks:
lowercase__ = self.expand_block(lowerCAmelCase)
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.h
for i in range(0 , 80):
if 0 <= i < 20:
lowercase__ = (b & c) | ((~b) & d)
lowercase__ = 0X5_A_8_2_7_9_9_9
elif 20 <= i < 40:
lowercase__ = b ^ c ^ d
lowercase__ = 0X6_E_D_9_E_B_A_1
elif 40 <= i < 60:
lowercase__ = (b & c) | (b & d) | (c & d)
lowercase__ = 0X8_F_1_B_B_C_D_C
elif 60 <= i < 80:
lowercase__ = b ^ c ^ d
lowercase__ = 0XC_A_6_2_C_1_D_6
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = (
self.rotate(lowerCAmelCase , 5) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F,
a,
self.rotate(lowerCAmelCase , 30),
c,
d,
)
lowercase__ = (
self.h[0] + a & 0XF_F_F_F_F_F_F_F,
self.h[1] + b & 0XF_F_F_F_F_F_F_F,
self.h[2] + c & 0XF_F_F_F_F_F_F_F,
self.h[3] + d & 0XF_F_F_F_F_F_F_F,
self.h[4] + e & 0XF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h)
def _lowerCAmelCase ( ):
lowercase__ = B'Test String'
assert SHAaHash(A__ ).final_hash() == hashlib.shaa(A__ ).hexdigest() # noqa: S324
def _lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowercase__ = parser.parse_args()
lowercase__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase__ = f.read()
else:
lowercase__ = bytes(A__ , 'utf-8' )
print(SHAaHash(A__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 642
| 1
|
from __future__ import annotations
import os
from collections.abc import Mapping
a__ : Union[str, Any] = tuple[int, int]
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase : set[int] , lowerCAmelCase : Mapping[EdgeT, int]) -> None:
"""simple docstring"""
lowercase__ = vertices
lowercase__ = {
(min(lowerCAmelCase), max(lowerCAmelCase)): weight for edge, weight in edges.items()
}
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : EdgeT , lowerCAmelCase : int) -> None:
"""simple docstring"""
self.vertices.add(edge[0])
self.vertices.add(edge[1])
lowercase__ = weight
def UpperCAmelCase ( self : int) -> Graph:
"""simple docstring"""
lowercase__ = Graph({min(self.vertices)} , {})
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
while len(subgraph.vertices) < len(self.vertices):
lowercase__ = max(self.edges.values()) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowercase__ = edge
lowercase__ = weight
subgraph.add_edge(lowerCAmelCase , lowerCAmelCase)
return subgraph
def _lowerCAmelCase ( A__ = "p107_network.txt" ):
lowercase__ = os.path.abspath(os.path.dirname(A__ ) )
lowercase__ = os.path.join(A__ , A__ )
lowercase__ = {}
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
with open(A__ ) as f:
lowercase__ = f.read().strip().split('\n' )
lowercase__ = [line.split(',' ) for line in data]
for edgea in range(1 , len(A__ ) ):
for edgea in range(A__ ):
if adjaceny_matrix[edgea][edgea] != "-":
lowercase__ = int(adjaceny_matrix[edgea][edgea] )
lowercase__ = Graph(set(range(len(A__ ) ) ) , A__ )
lowercase__ = graph.prims_algorithm()
lowercase__ = sum(graph.edges.values() )
lowercase__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 642
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
a__ : List[Any] = logging.get_logger(__name__)
a__ : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
a__ : List[Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
a__ : int = {
"facebook/bart-base": 10_24,
"facebook/bart-large": 10_24,
"facebook/bart-large-mnli": 10_24,
"facebook/bart-large-cnn": 10_24,
"facebook/bart-large-xsum": 10_24,
"yjernite/bart_eli5": 10_24,
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Optional[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = ["input_ids", "attention_mask"]
A : Any = BartTokenizer
def __init__( self : List[Any] , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : str="replace" , lowerCAmelCase : str="<s>" , lowerCAmelCase : int="</s>" , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : str="<unk>" , lowerCAmelCase : int="<pad>" , lowerCAmelCase : int="<mask>" , lowerCAmelCase : Dict=False , lowerCAmelCase : List[Any]=True , **lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space:
lowercase__ = getattr(lowerCAmelCase , pre_tok_state.pop('type'))
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**lowerCAmelCase)
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = 'post_processor'
lowercase__ = getattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state['sep'])
if "cls" in state:
lowercase__ = tuple(state['cls'])
lowercase__ = False
if state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get('trim_offsets' , lowerCAmelCase) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(lowerCAmelCase , state.pop('type'))
lowercase__ = component_class(**lowerCAmelCase)
setattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
@property
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else value
lowercase__ = value
def UpperCAmelCase ( self : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[int]) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
lowercase__ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase)
return tuple(lowerCAmelCase)
def UpperCAmelCase ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=None) -> Tuple:
"""simple docstring"""
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 642
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : List[str] = "▁"
a__ : Dict = {"vocab_file": "sentencepiece.bpe.model"}
a__ : List[str] = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
a__ : List[Any] = {
"facebook/mbart-large-50-one-to-many-mmt": 10_24,
}
# fmt: off
a__ : int = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Optional[int] = VOCAB_FILES_NAMES
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Any = PRETRAINED_VOCAB_FILES_MAP
A : List[str] = ["input_ids", "attention_mask"]
A : List[int] = []
A : List[int] = []
def __init__( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : Dict="<s>" , lowerCAmelCase : Any="<unk>" , lowerCAmelCase : Optional[int]="<pad>" , lowerCAmelCase : str="<mask>" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : Optional[Any] , ) -> None:
"""simple docstring"""
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase__ = kwargs.get('additional_special_tokens' , [])
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowerCAmelCase))
lowercase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ = 1
lowercase__ = len(self.sp_model)
lowercase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase)
}
lowercase__ = {v: k for k, v in self.lang_code_to_id.items()}
lowercase__ = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase__ = src_lang if src_lang is not None else 'en_XX'
lowercase__ = self.lang_code_to_id[self._src_lang]
lowercase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCAmelCase ( self : str , lowerCAmelCase : str) -> None:
"""simple docstring"""
lowercase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self : List[str]) -> Dict:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self : List[str] , lowerCAmelCase : Dict) -> None:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(lowerCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str) -> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ = self.sp_model.PieceToId(lowerCAmelCase)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self : str , lowerCAmelCase : int) -> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = []
lowercase__ = ''
lowercase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase) + token
lowercase__ = True
lowercase__ = []
else:
current_sub_tokens.append(lowerCAmelCase)
lowercase__ = False
out_string += self.sp_model.decode(lowerCAmelCase)
return out_string.strip()
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase , 'wb') as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase)
return (out_vocab_file,)
def UpperCAmelCase ( self : int , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase)
lowercase__ = [1] * len(self.prefix_tokens)
lowercase__ = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase)) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase)) + ([0] * len(lowerCAmelCase)) + suffix_ones
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase ( self : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] , lowerCAmelCase : Optional[str] , **lowerCAmelCase : Optional[Any]) -> List[Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
lowercase__ = src_lang
lowercase__ = self(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase)
lowercase__ = self.convert_tokens_to_ids(lowerCAmelCase)
lowercase__ = tgt_lang_id
return inputs
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : str = "en_XX" , lowerCAmelCase : Optional[List[str]] = None , lowerCAmelCase : str = "ro_RO" , **lowerCAmelCase : Optional[Any] , ) -> BatchEncoding:
"""simple docstring"""
lowercase__ = src_lang
lowercase__ = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang)
def UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def UpperCAmelCase ( self : Any , lowerCAmelCase : str) -> None:
"""simple docstring"""
lowercase__ = self.lang_code_to_id[src_lang]
lowercase__ = [self.cur_lang_code_id]
lowercase__ = [self.eos_token_id]
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str) -> None:
"""simple docstring"""
lowercase__ = self.lang_code_to_id[tgt_lang]
lowercase__ = [self.cur_lang_code_id]
lowercase__ = [self.eos_token_id]
| 642
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : str = (DDIMParallelScheduler,)
A : Any = (("eta", 0.0), ("num_inference_steps", 50))
def UpperCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = {
'num_train_timesteps': 10_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**lowerCAmelCase)
return config
def UpperCAmelCase ( self : int , **lowerCAmelCase : str) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**lowerCAmelCase)
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase)
for t in scheduler.timesteps:
lowercase__ = model(lowerCAmelCase , lowerCAmelCase)
lowercase__ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase).prev_sample
return sample
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase)
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(steps_offset=1)
lowercase__ = scheduler_class(**lowerCAmelCase)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1]))
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , )
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00]):
self.check_over_forward(time_step=lowerCAmelCase , num_inference_steps=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=lowerCAmelCase , eta=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00) - 0.1_47_71)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60) - 0.3_24_60)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98) - 0.02)) < 1E-5
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
scheduler.set_timesteps(lowerCAmelCase)
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = self.dummy_sample_deter + 0.1
lowercase__ = self.dummy_sample_deter - 0.1
lowercase__ = samplea.shape[0]
lowercase__ = torch.stack([samplea, samplea, samplea] , dim=0)
lowercase__ = torch.arange(lowerCAmelCase)[0:3, None].repeat(1 , lowerCAmelCase)
lowercase__ = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowercase__ = scheduler.batch_step_no_noise(lowerCAmelCase , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , lowerCAmelCase)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 11_47.79_04) < 1E-2
assert abs(result_mean.item() - 0.49_82) < 1E-3
def UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
lowercase__ = self.full_loop()
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_72.00_67) < 1E-2
assert abs(result_mean.item() - 0.22_39_67) < 1E-3
def UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(prediction_type='v_prediction')
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 52.53_02) < 1E-2
assert abs(result_mean.item() - 0.06_84) < 1E-3
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.82_95) < 1E-2
assert abs(result_mean.item() - 0.19_51) < 1E-3
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.07_84) < 1E-2
assert abs(result_mean.item() - 0.19_41) < 1E-3
| 642
| 1
|
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Optional[Any] = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[str] = "efficientformer"
def __init__( self : Any , lowerCAmelCase : List[int] = [3, 2, 6, 4] , lowerCAmelCase : List[int] = [48, 96, 2_24, 4_48] , lowerCAmelCase : List[bool] = [True, True, True, True] , lowerCAmelCase : int = 4_48 , lowerCAmelCase : int = 32 , lowerCAmelCase : int = 4 , lowerCAmelCase : int = 7 , lowerCAmelCase : int = 5 , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 4 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : int = 16 , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : int = 1 , lowerCAmelCase : bool = True , lowerCAmelCase : bool = True , lowerCAmelCase : float = 1E-5 , lowerCAmelCase : str = "gelu" , lowerCAmelCase : float = 0.02 , lowerCAmelCase : float = 1E-1_2 , lowerCAmelCase : int = 2_24 , lowerCAmelCase : float = 1E-0_5 , **lowerCAmelCase : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = hidden_sizes
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = depths
lowercase__ = mlp_expansion_ratio
lowercase__ = downsamples
lowercase__ = dim
lowercase__ = key_dim
lowercase__ = attention_ratio
lowercase__ = resolution
lowercase__ = pool_size
lowercase__ = downsample_patch_size
lowercase__ = downsample_stride
lowercase__ = downsample_pad
lowercase__ = drop_path_rate
lowercase__ = num_metaad_blocks
lowercase__ = distillation
lowercase__ = use_layer_scale
lowercase__ = layer_scale_init_value
lowercase__ = image_size
lowercase__ = batch_norm_eps
| 642
|
import cva
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : float , lowerCAmelCase : int) -> Dict:
"""simple docstring"""
if k in (0.04, 0.06):
lowercase__ = k
lowercase__ = window_size
else:
raise ValueError('invalid k value')
def __str__( self : Tuple) -> str:
"""simple docstring"""
return str(self.k)
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
lowercase__ = cva.imread(lowerCAmelCase , 0)
lowercase__, lowercase__ = img.shape
lowercase__ = []
lowercase__ = img.copy()
lowercase__ = cva.cvtColor(lowerCAmelCase , cva.COLOR_GRAY2RGB)
lowercase__, lowercase__ = np.gradient(lowerCAmelCase)
lowercase__ = dx**2
lowercase__ = dy**2
lowercase__ = dx * dy
lowercase__ = 0.04
lowercase__ = self.window_size // 2
for y in range(lowerCAmelCase , h - offset):
for x in range(lowerCAmelCase , w - offset):
lowercase__ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = (wxx * wyy) - (wxy**2)
lowercase__ = wxx + wyy
lowercase__ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r])
color_img.itemset((y, x, 0) , 0)
color_img.itemset((y, x, 1) , 0)
color_img.itemset((y, x, 2) , 2_55)
return color_img, corner_list
if __name__ == "__main__":
a__ : Dict = HarrisCorner(0.0_4, 3)
a__ , a__ : Dict = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 642
| 1
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase__:
'''simple docstring'''
@staticmethod
def UpperCAmelCase ( *lowerCAmelCase : Tuple , **lowerCAmelCase : Dict) -> int:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@require_torch
def UpperCAmelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
lowercase__ = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
lowercase__ = image_classifier(lowerCAmelCase , candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowerCAmelCase) , [
[{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}],
[{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'c'}, {'score': 0.3_33, 'label': 'b'}],
] , )
lowercase__ = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(lowerCAmelCase) , [
[
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
],
[
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
],
[
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
],
[
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
],
[
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
],
] , )
@require_tf
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf')
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
lowercase__ = image_classifier(lowerCAmelCase , candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(lowerCAmelCase) , [{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}] , )
lowercase__ = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(lowerCAmelCase) , [
[
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
],
[
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
],
[
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
],
[
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
],
[
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
{'score': 0.3_33, 'label': ANY(lowerCAmelCase)},
],
] , )
@slow
@require_torch
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
lowercase__ = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
lowercase__ = image_classifier(lowerCAmelCase , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(lowerCAmelCase) , [
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
] , )
lowercase__ = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(lowerCAmelCase) , [
[
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
lowercase__ = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf')
# This is an image of 2 cats with remotes and no planes
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
lowercase__ = image_classifier(lowerCAmelCase , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(lowerCAmelCase) , [
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
] , )
lowercase__ = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(lowerCAmelCase) , [
[
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
],
]
* 5 , )
| 642
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : List[Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : int = "speech_to_text"
A : Optional[Any] = ["past_key_values"]
A : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[int] , lowerCAmelCase : Tuple=1_00_00 , lowerCAmelCase : int=12 , lowerCAmelCase : int=20_48 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : str=6 , lowerCAmelCase : Dict=20_48 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict="relu" , lowerCAmelCase : Tuple=2_56 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Any=60_00 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[Any]=(5, 5) , lowerCAmelCase : Union[str, Any]=10_24 , lowerCAmelCase : List[Any]=80 , lowerCAmelCase : List[str]=1 , **lowerCAmelCase : List[str] , ) -> Dict:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = max_source_positions
lowercase__ = max_target_positions
lowercase__ = num_conv_layers
lowercase__ = list(lowerCAmelCase)
lowercase__ = conv_channels
lowercase__ = input_feat_per_channel
lowercase__ = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''')
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 642
| 1
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCAmelCase__( unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = load_tool('text-to-speech')
self.tool.setup()
def UpperCAmelCase ( self : Any) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
lowercase__ = self.tool('hey')
lowercase__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85]) , ))
def UpperCAmelCase ( self : Dict) -> Any:
"""simple docstring"""
torch.manual_seed(0)
lowercase__ = self.tool('hey')
lowercase__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85]) , ))
| 642
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Any = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
| 1
|
def _lowerCAmelCase ( A__ = 1_000 ):
lowercase__ = 2**power
lowercase__ = str(A__ )
lowercase__ = list(A__ )
lowercase__ = 0
for i in list_num:
sum_of_num += int(A__ )
return sum_of_num
if __name__ == "__main__":
a__ : Any = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
a__ : Union[str, Any] = solution(power)
print("Sum of the digits is: ", result)
| 642
|
# Imports
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None) -> Dict:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : str=None , lowerCAmelCase : str=None) -> int:
"""simple docstring"""
if red is not None:
lowercase__ = red
if green is not None:
lowercase__ = green
if blue is not None:
lowercase__ = blue
if red_edge is not None:
lowercase__ = red_edge
if nir is not None:
lowercase__ = nir
return True
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Union[str, Any]="" , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None) -> Union[str, Any]:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
lowercase__ = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!')
return False
def UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[Any]=0.08 , lowerCAmelCase : Optional[int]=1.22 , lowerCAmelCase : int=0.03) -> List[Any]:
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return (self.nir / self.green) - 1
def UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
return (self.red - self.blue) / self.red
def UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
return self.nir - self.green
def UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def UpperCAmelCase ( self : int , lowerCAmelCase : int=0.16) -> Dict:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase ( self : str , lowerCAmelCase : Optional[int]=0.5) -> Union[str, Any]:
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=None) -> Tuple:
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
lowercase__ = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
lowercase__ = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def UpperCAmelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 642
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 642
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class UpperCAmelCase__( unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = load_tool('text-classification')
self.tool.setup()
lowercase__ = load_tool('text-classification' , remote=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
lowercase__ = self.tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.remote_tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
lowercase__ = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
| 642
| 1
|
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
a__ : int = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
a__ : Optional[Any] = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
a__ : int = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
a__ : List[str] = sorted(arg_to_scheduler.keys())
a__ : Any = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class UpperCAmelCase__( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : argparse.Namespace , lowerCAmelCase : List[str]=None , lowerCAmelCase : Tuple="base" , lowerCAmelCase : Any=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : List[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowerCAmelCase)
lowercase__ = 0
lowercase__ = Path(self.hparams.output_dir)
lowercase__ = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
lowercase__ = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=lowerCAmelCase , **lowerCAmelCase , )
else:
lowercase__ = config
lowercase__ = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , lowerCAmelCase , lowerCAmelCase):
assert hasattr(self.config , lowerCAmelCase), f'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , lowerCAmelCase , getattr(self.hparams , lowerCAmelCase))
if tokenizer is None:
lowercase__ = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowerCAmelCase , )
else:
lowercase__ = tokenizer
lowercase__ = MODEL_MODES[mode]
if model is None:
lowercase__ = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path) , config=self.config , cache_dir=lowerCAmelCase , )
else:
lowercase__ = model
def UpperCAmelCase ( self : Dict , *lowerCAmelCase : Any , **lowerCAmelCase : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_type.from_pretrained(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
lowercase__ = arg_to_scheduler[self.hparams.lr_scheduler]
lowercase__ = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps())
lowercase__ = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model
lowercase__ = ['bias', 'LayerNorm.weight']
lowercase__ = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
lowercase__ = Adafactor(
lowerCAmelCase , lr=self.hparams.learning_rate , scale_parameter=lowerCAmelCase , relative_step=lowerCAmelCase)
else:
lowercase__ = AdamW(
lowerCAmelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon)
lowercase__ = optimizer
lowercase__ = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any) -> int:
"""simple docstring"""
return self.validation_step(lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Optional[int]) -> List[Any]:
"""simple docstring"""
return self.validation_end(lowerCAmelCase)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
lowercase__ = max(1 , self.hparams.gpus) # TODO: consider num_tpu_cores
lowercase__ = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase ( self : Any , lowerCAmelCase : Tuple) -> str:
"""simple docstring"""
if stage == "test":
lowercase__ = len(self.test_dataloader().dataset)
else:
lowercase__ = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=lowerCAmelCase)
lowercase__ = len(self.train_dataloader().dataset)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : bool = False) -> List[str]:
"""simple docstring"""
raise NotImplementedError('You must implement this for your task')
def UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
return self.train_loader
def UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Union[str, Any]) -> Any:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
lowerCAmelCase , list(filter(lowerCAmelCase , self.hparams.model_name_or_path.split('/'))).pop() , str(self.hparams.max_seq_length) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Dict[str, Any]) -> None:
"""simple docstring"""
lowercase__ = self.output_dir.joinpath('best_tfmr')
lowercase__ = self.step_count
self.model.save_pretrained(lowerCAmelCase)
self.tokenizer.save_pretrained(lowerCAmelCase)
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any) -> int:
"""simple docstring"""
parser.add_argument(
'--model_name_or_path' , default=lowerCAmelCase , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=lowerCAmelCase , help='Pretrained config name or path if not the same as model_name')
parser.add_argument(
'--tokenizer_name' , default=lowerCAmelCase , type=lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(lowerCAmelCase).parent / 'test_run' / 'cache') , type=lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=lowerCAmelCase , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=lowerCAmelCase , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=lowerCAmelCase , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=lowerCAmelCase , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=lowerCAmelCase , help='The initial learning rate for Adam.')
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=lowerCAmelCase , metavar=lowerCAmelCase , type=lowerCAmelCase , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=lowerCAmelCase , help='Weight decay if we apply some.')
parser.add_argument('--adam_epsilon' , default=1E-8 , type=lowerCAmelCase , help='Epsilon for Adam optimizer.')
parser.add_argument('--warmup_steps' , default=0 , type=lowerCAmelCase , help='Linear warmup over warmup_steps.')
parser.add_argument('--num_workers' , default=4 , type=lowerCAmelCase , help='kwarg passed to DataLoader')
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=lowerCAmelCase)
parser.add_argument('--train_batch_size' , default=32 , type=lowerCAmelCase)
parser.add_argument('--eval_batch_size' , default=32 , type=lowerCAmelCase)
parser.add_argument('--adafactor' , action='store_true')
class UpperCAmelCase__( pl.Callback ):
'''simple docstring'''
def UpperCAmelCase ( self : int , lowerCAmelCase : Any , lowerCAmelCase : List[Any]) -> Any:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class UpperCAmelCase__( pl.Callback ):
'''simple docstring'''
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : int) -> Union[str, Any]:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowerCAmelCase)
class UpperCAmelCase__( pl.Callback ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = trainer.lr_schedulers[0]['scheduler']
lowercase__ = {f'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr())}
pl_module.logger.log_metrics(lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : pl.Trainer , lowerCAmelCase : pl.LightningModule) -> List[str]:
"""simple docstring"""
rank_zero_info('***** Validation results *****')
lowercase__ = trainer.callback_metrics
# Log results
for key in sorted(lowerCAmelCase):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(lowerCAmelCase , str(metrics[key])))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : pl.Trainer , lowerCAmelCase : pl.LightningModule) -> str:
"""simple docstring"""
rank_zero_info('***** Test results *****')
lowercase__ = trainer.callback_metrics
# Log and save results to file
lowercase__ = os.path.join(pl_module.hparams.output_dir , 'test_results.txt')
with open(lowerCAmelCase , 'w') as writer:
for key in sorted(lowerCAmelCase):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(lowerCAmelCase , str(metrics[key])))
writer.write('{} = {}\n'.format(lowerCAmelCase , str(metrics[key])))
def _lowerCAmelCase ( A__ , A__ ):
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'--output_dir' , default=str(Path(A__ ).parent / 'test_run' / 'model_checkpoints' ) , type=A__ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=A__ , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=A__ )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=A__ , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=A__ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=A__ , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(A__ ).parent / 'test_run' / 'dummy-train-data' ) , type=A__ , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def _lowerCAmelCase ( A__ , A__ , A__=None , A__=True , A__=[] , A__=None , A__=None , **A__ , ):
pl.seed_everything(args.seed )
# init model
lowercase__ = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=A__ )
# add custom checkpoints
if checkpoint_callback is None:
lowercase__ = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(A__ )
if logging_callback is None:
lowercase__ = LoggingCallback()
lowercase__ = {}
if args.fpaa:
lowercase__ = 16
if args.gpus > 1:
lowercase__ = 'auto'
lowercase__ = 'ddp'
lowercase__ = args.accumulate_grad_batches
lowercase__ = None
lowercase__ = 'auto'
lowercase__ = pl.Trainer.from_argparse_args(
A__ , weights_summary=A__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=A__ , val_check_interval=1 , num_sanity_val_steps=2 , **A__ , )
if args.do_train:
trainer.fit(A__ )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 642
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[Any] = None
A : Optional[int] = None
@property
def UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(lowerCAmelCase , 'feature_size'))
self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate'))
self.assertTrue(hasattr(lowerCAmelCase , 'padding_value'))
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(lowerCAmelCase) == len(lowerCAmelCase) for x, y in zip(lowerCAmelCase , processed_features[input_name])))
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='np')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='tf')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def UpperCAmelCase ( self : str , lowerCAmelCase : str=False) -> Union[str, Any]:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = self.feat_extract_tester.seq_length_diff
lowercase__ = self.feat_extract_tester.max_seq_length + pad_diff
lowercase__ = self.feat_extract_tester.min_seq_length
lowercase__ = self.feat_extract_tester.batch_size
lowercase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , padding=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest')
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1]))
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
lowercase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length')[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
self.assertTrue(all(len(lowerCAmelCase) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
lowercase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCAmelCase) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
lowercase__ = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1E-3)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict=False) -> str:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : str , lowerCAmelCase : Optional[Any]):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
# truncate to smallest
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]))
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to smallest with np
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np' , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to middle
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length' , truncation=lowerCAmelCase)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = 12
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , )
lowercase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase__ = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
lowercase__ = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
@require_torch
def UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='pt')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2)
@require_tf
def UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='tf')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1E-2)
def UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = min(lowerCAmelCase)
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 642
| 1
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowerCAmelCase ( A__ ):
lowercase__ = prime_factors(A__ )
if is_square_free(A__ ):
return -1 if len(A__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowerCAmelCase ( A__ ):
lowercase__ = prime_factors(A__ )
if is_square_free(A__ ):
return -1 if len(A__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
| 1
|
from __future__ import annotations
import math
def _lowerCAmelCase ( A__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
a__ : Any = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def _lowerCAmelCase ( A__ ):
if not isinstance(A__ , A__ ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
lowercase__ = []
for num in range(len(A__ ) ):
lowercase__ = 0
while 2 * i * i <= odd_composites[num]:
lowercase__ = odd_composites[num] - 2 * i * i
if is_prime(A__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(A__ ) == n:
return list_nums
return []
def _lowerCAmelCase ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 642
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ : List[str] = logging.get_logger(__name__)
a__ : List[Any] = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
A : List[str] = "focalnet"
def __init__( self : Dict , lowerCAmelCase : Union[str, Any]=2_24 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : int=3 , lowerCAmelCase : Union[str, Any]=96 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : int=[1_92, 3_84, 7_68, 7_68] , lowerCAmelCase : str=[2, 2, 6, 2] , lowerCAmelCase : Tuple=[2, 2, 2, 2] , lowerCAmelCase : Optional[Any]=[3, 3, 3, 3] , lowerCAmelCase : int="gelu" , lowerCAmelCase : Any=4.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Tuple=1E-4 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : str=0.02 , lowerCAmelCase : Optional[int]=1E-5 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : str , ) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = use_conv_embed
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = focal_levels
lowercase__ = focal_windows
lowercase__ = hidden_act
lowercase__ = mlp_ratio
lowercase__ = hidden_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = use_layerscale
lowercase__ = layerscale_value
lowercase__ = use_post_layernorm
lowercase__ = use_post_layernorm_in_modulation
lowercase__ = normalize_modulator
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = encoder_stride
lowercase__ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(self.depths) + 1)]
lowercase__, lowercase__ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names)
| 642
| 1
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = F'''{sampling_rate}'''
lowercase__ = '1'
lowercase__ = 'f32le'
lowercase__ = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(A__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowercase__ = ffmpeg_process.communicate(A__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
lowercase__ = output_stream[0]
lowercase__ = np.frombuffer(A__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def _lowerCAmelCase ( A__ , A__ , A__ = "f32le" , ):
lowercase__ = F'''{sampling_rate}'''
lowercase__ = '1'
if format_for_conversion == "s16le":
lowercase__ = 2
elif format_for_conversion == "f32le":
lowercase__ = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowercase__ = platform.system()
if system == "Linux":
lowercase__ = 'alsa'
lowercase__ = 'default'
elif system == "Darwin":
lowercase__ = 'avfoundation'
lowercase__ = ':0'
elif system == "Windows":
lowercase__ = 'dshow'
lowercase__ = 'default'
lowercase__ = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
lowercase__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowercase__ = _ffmpeg_stream(A__ , A__ )
for item in iterator:
yield item
def _lowerCAmelCase ( A__ , A__ , A__ = None , A__ = None , A__ = "f32le" , ):
if stream_chunk_s is not None:
lowercase__ = stream_chunk_s
else:
lowercase__ = chunk_length_s
lowercase__ = ffmpeg_microphone(A__ , A__ , format_for_conversion=A__ )
if format_for_conversion == "s16le":
lowercase__ = np.intaa
lowercase__ = 2
elif format_for_conversion == "f32le":
lowercase__ = np.floataa
lowercase__ = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowercase__ = chunk_length_s / 6
lowercase__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(A__ , (int, float) ):
lowercase__ = [stride_length_s, stride_length_s]
lowercase__ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowercase__ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowercase__ = datetime.datetime.now()
lowercase__ = datetime.timedelta(seconds=A__ )
for item in chunk_bytes_iter(A__ , A__ , stride=(stride_left, stride_right) , stream=A__ ):
# Put everything back in numpy scale
lowercase__ = np.frombuffer(item['raw'] , dtype=A__ )
lowercase__ = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
lowercase__ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _lowerCAmelCase ( A__ , A__ , A__ , A__ = False ):
lowercase__ = B''
lowercase__, lowercase__ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowercase__ = 0
for raw in iterator:
acc += raw
if stream and len(A__ ) < chunk_len:
lowercase__ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(A__ ) >= chunk_len:
# We are flushing the accumulator
lowercase__ = (_stride_left, stride_right)
lowercase__ = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
lowercase__ = False
yield item
lowercase__ = stride_left
lowercase__ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(A__ ) > stride_left:
lowercase__ = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
lowercase__ = False
yield item
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = 2**24 # 16Mo
try:
with subprocess.Popen(A__ , stdout=subprocess.PIPE , bufsize=A__ ) as ffmpeg_process:
while True:
lowercase__ = ffmpeg_process.stdout.read(A__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 642
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Dict = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
a__ : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
a__ : Any = {"facebook/blenderbot_small-90M": 5_12}
def _lowerCAmelCase ( A__ ):
lowercase__ = set()
lowercase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ = char
lowercase__ = set(A__ )
return pairs
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[str] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : int="__start__" , lowerCAmelCase : Dict="__end__" , lowerCAmelCase : Any="__unk__" , lowerCAmelCase : str="__null__" , **lowerCAmelCase : Optional[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , **lowerCAmelCase)
with open(lowerCAmelCase , encoding='utf-8') as vocab_handle:
lowercase__ = json.load(lowerCAmelCase)
lowercase__ = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase , encoding='utf-8') as merges_handle:
lowercase__ = merges_handle.read().split('\n')[1:-1]
lowercase__ = [tuple(merge.split()) for merge in merges]
lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase))))
lowercase__ = {}
@property
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return len(self.encoder)
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase ( self : str , lowerCAmelCase : str) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ = re.sub('([.,!?()])' , R' \1' , lowerCAmelCase)
lowercase__ = re.sub('(\')' , R' \1 ' , lowerCAmelCase)
lowercase__ = re.sub(R'\s{2,}' , ' ' , lowerCAmelCase)
if "\n" in token:
lowercase__ = token.replace('\n' , ' __newln__')
lowercase__ = token.split(' ')
lowercase__ = []
for token in tokens:
if not len(lowerCAmelCase):
continue
lowercase__ = token.lower()
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = tuple(list(word[:-1]) + [word[-1] + '</w>'])
lowercase__ = get_pairs(lowerCAmelCase)
if not pairs:
words.append(lowerCAmelCase)
continue
while True:
lowercase__ = min(lowerCAmelCase , key=lambda lowerCAmelCase: self.bpe_ranks.get(lowerCAmelCase , float('inf')))
if bigram not in self.bpe_ranks:
break
lowercase__, lowercase__ = bigram
lowercase__ = []
lowercase__ = 0
while i < len(lowerCAmelCase):
try:
lowercase__ = word.index(lowerCAmelCase , lowerCAmelCase)
new_word.extend(word[i:j])
lowercase__ = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(lowerCAmelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = new_word
if len(lowerCAmelCase) == 1:
break
else:
lowercase__ = get_pairs(lowerCAmelCase)
lowercase__ = '@@ '.join(lowerCAmelCase)
lowercase__ = word[:-4]
lowercase__ = word
words.append(lowerCAmelCase)
return " ".join(lowerCAmelCase)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = re.findall(R'\S+\n?' , lowerCAmelCase)
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase).split(' ')))
return split_tokens
def UpperCAmelCase ( self : int , lowerCAmelCase : str) -> int:
"""simple docstring"""
lowercase__ = token.lower()
return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token))
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self.decoder.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : List[str]) -> str:
"""simple docstring"""
lowercase__ = ' '.join(lowerCAmelCase).replace('@@ ' , '').strip()
return out_string
def UpperCAmelCase ( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(lowerCAmelCase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase) + '\n')
lowercase__ = 0
with open(lowerCAmelCase , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase: kv[1]):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!')
lowercase__ = token_index
writer.write(' '.join(lowerCAmelCase) + '\n')
index += 1
return vocab_file, merge_file
| 642
| 1
|
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
a__ : str = logging.get_logger(__name__)
def _lowerCAmelCase ( A__ , A__ ):
try:
with open(A__ , 'rb' ) as flax_state_f:
lowercase__ = from_bytes(A__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(A__ ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'''Unable to convert {model_file} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(A__ , A__ )
def _lowerCAmelCase ( A__ , A__ ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
lowercase__ = flatten_dict(jax.tree_util.tree_map(lambda A__ : x.dtype == jnp.bfloataa , A__ ) ).values()
if any(A__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
lowercase__ = jax.tree_util.tree_map(
lambda A__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , A__ )
lowercase__ = ''
lowercase__ = flatten_dict(A__ , sep='.' )
lowercase__ = pt_model.state_dict()
# keep track of unexpected & missing keys
lowercase__ = []
lowercase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowercase__ = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowercase__ = flax_key_tuple_array[:-1] + ['weight']
lowercase__ = jnp.transpose(A__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowercase__ = flax_key_tuple_array[:-1] + ['weight']
lowercase__ = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowercase__ = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(A__ ):
lowercase__ = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
lowercase__ = '.'.join(A__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
lowercase__ = np.asarray(A__ ) if not isinstance(A__ , np.ndarray ) else flax_tensor
lowercase__ = torch.from_numpy(A__ )
# remove from missing keys
missing_keys.remove(A__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(A__ )
pt_model.load_state_dict(A__ )
# re-transform missing_keys to list
lowercase__ = list(A__ )
if len(A__ ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(A__ ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
' use it for predictions and inference.' )
return pt_model
| 642
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Optional[int] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
| 1
|
def _lowerCAmelCase ( A__ = 1_000 ):
lowercase__, lowercase__ = 1, 1
lowercase__ = 2
while True:
lowercase__ = 0
lowercase__ = fa + fa
lowercase__, lowercase__ = fa, f
index += 1
for _ in str(A__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 642
|
import heapq
import sys
import numpy as np
a__ : Dict = tuple[int, int]
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = []
lowercase__ = set()
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf')
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return len(self.elements) == 0
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str]) -> List[str]:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item))
self.set.add(lowerCAmelCase)
else:
# update
# print("update", item)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pri, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
temp.append((priority, item))
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
if item in self.set:
self.set.remove(lowerCAmelCase)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pro, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy))
def UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
self.set.remove(lowerCAmelCase)
return (priority, item)
def _lowerCAmelCase ( A__ , A__ ):
# euclidean distance
lowercase__ = np.array(A__ )
lowercase__ = np.array(A__ )
return np.linalg.norm(a - b )
def _lowerCAmelCase ( A__ , A__ ):
# integer division by time variable
return consistent_heuristic(A__ , A__ ) // t
def _lowerCAmelCase ( A__ , A__ ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__ = g_function[start] + Wa * heuristics[i](A__ , A__ )
return ans
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = np.chararray((n, n) )
for i in range(A__ ):
for j in range(A__ ):
lowercase__ = '*'
for i in range(A__ ):
for j in range(A__ ):
if (j, (n - 1) - i) in blocks:
lowercase__ = '#'
lowercase__ = '-'
lowercase__ = back_pointer[goal]
while x != start:
((lowercase__), (lowercase__)) = x
# print(x)
lowercase__ = '-'
lowercase__ = back_pointer[x]
lowercase__ = '-'
for i in range(A__ ):
for j in range(A__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowercase__ = back_pointer[goal]
while x != start:
print(A__ , end=' ' )
lowercase__ = back_pointer[x]
print(A__ )
sys.exit()
def _lowerCAmelCase ( A__ ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
for itera in range(A__ ):
open_list[itera].remove_element(A__ )
# print("s", s)
# print("j", j)
((lowercase__), (lowercase__)) = s
lowercase__ = (x - 1, y)
lowercase__ = (x + 1, y)
lowercase__ = (x, y + 1)
lowercase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(A__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(A__ )
lowercase__ = -1
lowercase__ = float('inf' )
if valid(A__ ) and g_function[neighbours] > g_function[s] + 1:
lowercase__ = g_function[s] + 1
lowercase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(A__ , key(A__ , 0 , A__ , A__ ) )
if neighbours not in close_list_inad:
for var in range(1 , A__ ):
if key(A__ , A__ , A__ , A__ ) <= Wa * key(
A__ , 0 , A__ , A__ ):
open_list[j].put(
A__ , key(A__ , A__ , A__ , A__ ) )
def _lowerCAmelCase ( ):
lowercase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a__ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a__ : Any = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a__ : Any = make_common_ground()
a__ : Union[str, Any] = blocks_blk
# hyper parameters
a__ : List[Any] = 1
a__ : List[str] = 1
a__ : Optional[int] = 20
a__ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a__ : Tuple = (0, 0)
a__ : str = (n - 1, n - 1)
a__ : Optional[Any] = 1
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = {start: 0, goal: float('inf' )}
lowercase__ = {start: -1, goal: -1}
lowercase__ = []
lowercase__ = set()
for i in range(A__ ):
open_list.append(PriorityQueue() )
open_list[i].put(A__ , key(A__ , A__ , A__ , A__ ) )
lowercase__ = []
lowercase__ = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , A__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__, lowercase__ = open_list[i].top_show()
visited.add(A__ )
expand_state(
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_inad.append(A__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__ = open_list[0].top_show()
visited.add(A__ )
expand_state(
A__ , 0 , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_anchor.append(A__ )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(A__ ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 642
| 1
|
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
a__ : List[str] = logging.get_logger(__name__)
def _lowerCAmelCase ( A__ ):
if isinstance(A__ , np.ndarray ):
return list(tensor.shape )
lowercase__ = tf.shape(A__ )
if tensor.shape == tf.TensorShape(A__ ):
return dynamic
lowercase__ = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(A__ )]
def _lowerCAmelCase ( A__ , A__ = None , A__ = None ):
return tf.nn.softmax(logits=logits + 1E-9 , axis=A__ , name=A__ )
def _lowerCAmelCase ( A__ , A__ , A__ , A__=1E-5 , A__=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(A__ , A__ ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
lowercase__, lowercase__ = tf.nn.moments(A__ , axes=[axis] , keepdims=A__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase__ = [1] * inputs.shape.rank
lowercase__ = shape_list(A__ )[axis]
lowercase__ = tf.reshape(A__ , A__ )
lowercase__ = tf.reshape(A__ , A__ )
# Compute layer normalization using the batch_normalization
# function.
lowercase__ = tf.nn.batch_normalization(
A__ , A__ , A__ , offset=A__ , scale=A__ , variance_epsilon=A__ , )
return outputs
def _lowerCAmelCase ( A__ , A__=0 , A__=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase__ = tf.shape(A__ )
lowercase__ = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase__ = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(A__ , A__ )
def _lowerCAmelCase ( A__ ):
if not isinstance(A__ , tf.Tensor ):
lowercase__ = tf.convert_to_tensor(A__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase__ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase__ = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase__ = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _lowerCAmelCase ( A__ , A__ , A__ = "input_ids" ):
tf.debugging.assert_less(
A__ , tf.cast(A__ , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(A__ )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = 64_512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase__ = [x for x in data if len(A__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
lowercase__ = np.asarray(A__ )
lowercase__ = 1
lowercase__ = np.array_split(A__ , A__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase__ = np.array_split(A__ , A__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(A__ ):
lowercase__ = chunk_data
else:
lowercase__ = data
def _lowerCAmelCase ( A__ , A__ ):
if name in group.attrs:
lowercase__ = [n.decode('utf8' ) if hasattr(A__ , 'decode' ) else n for n in group.attrs[name]]
else:
lowercase__ = []
lowercase__ = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(A__ , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def _lowerCAmelCase ( A__ ):
def _expand_single_ad_tensor(A__ ):
if isinstance(A__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(A__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , A__ )
| 642
|
import math
import sys
def _lowerCAmelCase ( A__ ):
lowercase__ = ''
try:
with open(A__ , 'rb' ) as binary_file:
lowercase__ = binary_file.read()
for dat in data:
lowercase__ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = {'0': '0', '1': '1'}
lowercase__, lowercase__ = '', ''
lowercase__ = len(A__ )
for i in range(len(A__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase__ = lexicon[curr_string]
result += last_match_id
lowercase__ = last_match_id + '0'
if math.loga(A__ ).is_integer():
lowercase__ = {}
for curr_key in list(A__ ):
lowercase__ = lexicon.pop(A__ )
lowercase__ = new_lex
lowercase__ = last_match_id + '1'
index += 1
lowercase__ = ''
return result
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = 8
try:
with open(A__ , 'wb' ) as opened_file:
lowercase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(A__ ) , A__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase__ = data_bits[counter:]
lowercase__ = data_bits[counter + 1 :]
return data_bits
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = read_file_binary(A__ )
lowercase__ = remove_prefix(A__ )
lowercase__ = decompress_data(A__ )
write_file_binary(A__ , A__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 642
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.