code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : Dict = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class snake_case_( a__ ):
__UpperCamelCase = '''mvp'''
__UpperCamelCase = ['''past_key_values''']
__UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , UpperCamelCase_ : Union[str, Any]=5_0_2_6_7 , UpperCamelCase_ : Tuple=1_0_2_4 , UpperCamelCase_ : int=1_2 , UpperCamelCase_ : Optional[int]=4_0_9_6 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : List[Any]=1_2 , UpperCamelCase_ : Dict=4_0_9_6 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : List[Any]=1_0_2_4 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : List[Any]=0.02 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : str=1 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Union[str, Any]=2 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Optional[int]=1_0_0 , UpperCamelCase_ : Dict=8_0_0 , **UpperCamelCase_ : Any , ):
lowerCAmelCase : str = vocab_size
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : Union[str, Any] = d_model
lowerCAmelCase : List[Any] = encoder_ffn_dim
lowerCAmelCase : Tuple = encoder_layers
lowerCAmelCase : Tuple = encoder_attention_heads
lowerCAmelCase : Any = decoder_ffn_dim
lowerCAmelCase : Optional[int] = decoder_layers
lowerCAmelCase : Any = decoder_attention_heads
lowerCAmelCase : Optional[Any] = dropout
lowerCAmelCase : Tuple = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : Union[str, Any] = activation_function
lowerCAmelCase : int = init_std
lowerCAmelCase : str = encoder_layerdrop
lowerCAmelCase : str = decoder_layerdrop
lowerCAmelCase : Dict = classifier_dropout
lowerCAmelCase : List[str] = use_cache
lowerCAmelCase : Optional[Any] = encoder_layers
lowerCAmelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase : Optional[Any] = use_prompt
lowerCAmelCase : Tuple = prompt_length
lowerCAmelCase : int = prompt_mid_dim
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , forced_eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , UpperCamelCase_ ):
lowerCAmelCase : int = self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'''The config can simply be saved and uploaded again to be fixed.''' )
| 60 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
lowerCAmelCase = parser.parse_args()
if args.check_lib:
lowerCAmelCase = importlib.import_module('''transformers''')
lowerCAmelCase = Path(transformers_module.__file__).parent
else:
lowerCAmelCase = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 295 | 0 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> Dict:
snake_case = RobertaPreLayerNormConfig.from_pretrained(
__lowerCAmelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
snake_case = torch.load(hf_hub_download(repo_id=__lowerCAmelCase , filename="""pytorch_model.bin""" ) )
snake_case = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
snake_case = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
snake_case = tensor_value
snake_case = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__lowerCAmelCase , config=__lowerCAmelCase , state_dict=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
# convert tokenizer
snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 3 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] ) -> Dict:
snake_case = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
snake_case = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __lowerCamelCase ( __lowerCAmelCase : Any ) -> Optional[Any]:
snake_case = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', """stage2.cls_token""") )
return token
def __lowerCamelCase ( ) -> Any:
snake_case = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str ) -> Optional[int]:
snake_case = """imagenet-1k-id2label.json"""
snake_case = 10_00
snake_case = """huggingface/label-files"""
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = snake_case = CvtConfig(num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case = [2, 2, 20]
snake_case = [3, 12, 16]
snake_case = [1_92, 7_68, 10_24]
snake_case = CvtForImageClassification(__lowerCAmelCase )
snake_case = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case = image_size
snake_case = torch.load(__lowerCAmelCase , map_location=torch.device("""cpu""" ) )
snake_case = OrderedDict()
snake_case = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case = list_of_state_dict + cls_token(__lowerCAmelCase )
snake_case = list_of_state_dict + embeddings(__lowerCAmelCase )
for cnt in range(config.depth[idx] ):
snake_case = list_of_state_dict + attention(__lowerCAmelCase , __lowerCAmelCase )
snake_case = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
snake_case = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 3 | 1 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCAmelCase_ = 'true'
def lowerCamelCase__ ( A__ : Any , A__ : List[str]=82 , A__ : int=16 ):
'''simple docstring'''
set_seed(42 )
__lowerCamelCase = RegressionModel()
__lowerCamelCase = deepcopy(A__ )
__lowerCamelCase = RegressionDataset(length=A__ )
__lowerCamelCase = DataLoader(A__ , batch_size=A__ )
model.to(accelerator.device )
__lowerCamelCase, __lowerCamelCase = accelerator.prepare(A__ , A__ )
return model, ddp_model, dataloader
def lowerCamelCase__ ( A__ : Accelerator , A__ : List[Any]=False ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
__lowerCamelCase = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(A__ : str ):
__lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A__ , max_length=A__ )
return outputs
with accelerator.main_process_first():
__lowerCamelCase = dataset.map(
A__ , batched=A__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
__lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(A__ : List[str] ):
if use_longest:
return tokenizer.pad(A__ , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(A__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return DataLoader(A__ , shuffle=A__ , collate_fn=A__ , batch_size=16 )
def lowerCamelCase__ ( A__ : int , A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = Accelerator(dispatch_batches=A__ , split_batches=A__ )
__lowerCamelCase = get_dataloader(A__ , not dispatch_batches )
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=A__ )
__lowerCamelCase, __lowerCamelCase = accelerator.prepare(A__ , A__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase__ ( A__ : Any , A__ : List[Any] , A__ : List[str] ):
'''simple docstring'''
__lowerCamelCase = []
for batch in dataloader:
__lowerCamelCase, __lowerCamelCase = batch.values()
with torch.no_grad():
__lowerCamelCase = model(A__ )
__lowerCamelCase, __lowerCamelCase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__lowerCamelCase, __lowerCamelCase = [], []
for logit, targ in logits_and_targets:
logits.append(A__ )
targs.append(A__ )
__lowerCamelCase, __lowerCamelCase = torch.cat(A__ ), torch.cat(A__ )
return logits, targs
def lowerCamelCase__ ( A__ : Accelerator , A__ : List[Any]=82 , A__ : List[Any]=False , A__ : str=False , A__ : Union[str, Any]=16 ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = get_basic_setup(A__ , A__ , A__ )
__lowerCamelCase, __lowerCamelCase = generate_predictions(A__ , A__ , A__ )
assert (
len(A__ ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(A__ )}'
def lowerCamelCase__ ( A__ : bool = False , A__ : bool = False ):
'''simple docstring'''
__lowerCamelCase = evaluate.load("""glue""" , """mrpc""" )
__lowerCamelCase, __lowerCamelCase = get_mrpc_setup(A__ , A__ )
# First do baseline
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = setup["""no"""]
model.to(A__ )
model.eval()
for batch in dataloader:
batch.to(A__ )
with torch.inference_mode():
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=A__ , references=batch["""labels"""] )
__lowerCamelCase = metric.compute()
# Then do distributed
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
__lowerCamelCase = batch["""labels"""]
__lowerCamelCase, __lowerCamelCase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=A__ , references=A__ )
__lowerCamelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = Accelerator(split_batches=A__ , dispatch_batches=A__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(A__ , A__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCamelCase = Accelerator(split_batches=A__ , dispatch_batches=A__ )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(A__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
__lowerCamelCase = Accelerator()
test_torch_metrics(A__ , 512 )
accelerator.state._reset_state()
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 12 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
a : Dict = (720, 1280) # Height, Width
a : Tuple = (0.4, 0.6) # if height or width lower than this scale, drop it.
a : Dict = 1 / 100
a : str = ''
a : Any = ''
a : Optional[int] = ''
a : List[str] = 250
def __magic_name__ ( ) -> None:
'''simple docstring'''
snake_case_ ,snake_case_ = get_dataset(__UpperCAmelCase, __UpperCAmelCase )
for index in range(__UpperCAmelCase ):
snake_case_ = random.sample(range(len(__UpperCAmelCase ) ), 4 )
snake_case_ ,snake_case_ ,snake_case_ = update_image_and_anno(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, filter_scale=__UpperCAmelCase, )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case_ = random_chars(32 )
snake_case_ = path.split(os.sep )[-1].rsplit('''.''', 1 )[0]
snake_case_ = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(F"{file_root}.jpg", __UpperCAmelCase, [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
snake_case_ = []
for anno in new_annos:
snake_case_ = anno[3] - anno[1]
snake_case_ = anno[4] - anno[2]
snake_case_ = anno[1] + width / 2
snake_case_ = anno[2] + height / 2
snake_case_ = F"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(__UpperCAmelCase )
with open(F"{file_root}.txt", '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> tuple[list, list]:
'''simple docstring'''
snake_case_ = []
snake_case_ = []
for label_file in glob.glob(os.path.join(__UpperCAmelCase, '''*.txt''' ) ):
snake_case_ = label_file.split(os.sep )[-1].rsplit('''.''', 1 )[0]
with open(__UpperCAmelCase ) as in_file:
snake_case_ = in_file.readlines()
snake_case_ = os.path.join(__UpperCAmelCase, F"{label_name}.jpg" )
snake_case_ = []
for obj_list in obj_lists:
snake_case_ = obj_list.rstrip('''\n''' ).split(''' ''' )
snake_case_ = float(obj[1] ) - float(obj[3] ) / 2
snake_case_ = float(obj[2] ) - float(obj[4] ) / 2
snake_case_ = float(obj[1] ) + float(obj[3] ) / 2
snake_case_ = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__UpperCAmelCase )
labels.append(__UpperCAmelCase )
return img_paths, labels
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = 0.0, ) -> tuple[list, list, str]:
'''simple docstring'''
snake_case_ = np.zeros([output_size[0], output_size[1], 3], dtype=np.uinta )
snake_case_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case_ = int(scale_x * output_size[1] )
snake_case_ = int(scale_y * output_size[0] )
snake_case_ = []
snake_case_ = []
for i, index in enumerate(__UpperCAmelCase ):
snake_case_ = all_img_list[index]
path_list.append(__UpperCAmelCase )
snake_case_ = all_annos[index]
snake_case_ = cva.imread(__UpperCAmelCase )
if i == 0: # top-left
snake_case_ = cva.resize(__UpperCAmelCase, (divid_point_x, divid_point_y) )
snake_case_ = img
for bbox in img_annos:
snake_case_ = bbox[1] * scale_x
snake_case_ = bbox[2] * scale_y
snake_case_ = bbox[3] * scale_x
snake_case_ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
snake_case_ = cva.resize(__UpperCAmelCase, (output_size[1] - divid_point_x, divid_point_y) )
snake_case_ = img
for bbox in img_annos:
snake_case_ = scale_x + bbox[1] * (1 - scale_x)
snake_case_ = bbox[2] * scale_y
snake_case_ = scale_x + bbox[3] * (1 - scale_x)
snake_case_ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
snake_case_ = cva.resize(__UpperCAmelCase, (divid_point_x, output_size[0] - divid_point_y) )
snake_case_ = img
for bbox in img_annos:
snake_case_ = bbox[1] * scale_x
snake_case_ = scale_y + bbox[2] * (1 - scale_y)
snake_case_ = bbox[3] * scale_x
snake_case_ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
snake_case_ = cva.resize(
__UpperCAmelCase, (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
snake_case_ = img
for bbox in img_annos:
snake_case_ = scale_x + bbox[1] * (1 - scale_x)
snake_case_ = scale_y + bbox[2] * (1 - scale_y)
snake_case_ = scale_x + bbox[3] * (1 - scale_x)
snake_case_ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
snake_case_ = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
snake_case_ = ascii_lowercase + digits
return "".join(random.choice(__UpperCAmelCase ) for _ in range(__UpperCAmelCase ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 56 | 0 |
from __future__ import annotations
from typing import Any
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0 ) -> None:
'''simple docstring'''
a__ , a__ : Optional[Any] =row, column
a__ : List[Any] =[[default_value for c in range(lowerCAmelCase__ )] for r in range(lowerCAmelCase__ )]
def __str__( self ) -> str:
'''simple docstring'''
a__ : List[Any] =F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
a__ : Any =0
for row_vector in self.array:
for obj in row_vector:
a__ : Dict =max(lowerCAmelCase__ , len(str(lowerCAmelCase__ ) ) )
a__ : int =F'''%{max_element_length}s'''
# Make string and return
def single_line(lowerCAmelCase__ ) -> str:
nonlocal string_format_identifier
a__ : List[Any] ="["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowerCAmelCase__ ) for row_vector in self.array )
return s
def __repr__( self ) -> str:
'''simple docstring'''
return str(self )
def _lowercase ( self , lowerCAmelCase__ ) -> bool:
'''simple docstring'''
if not (isinstance(lowerCAmelCase__ , (list, tuple) ) and len(lowerCAmelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
assert self.validate_indicies(lowerCAmelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
'''simple docstring'''
assert self.validate_indicies(lowerCAmelCase__ )
a__ : List[Any] =value
def __add__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert self.row == another.row and self.column == another.column
# Add
a__ : int =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
a__ : Any =self[r, c] + another[r, c]
return result
def __neg__( self ) -> Matrix:
'''simple docstring'''
a__ : Dict =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
a__ : List[str] =-self[r, c]
return result
def __sub__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
return self + (-another)
def __mul__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , (int, float) ): # Scalar multiplication
a__ : Tuple =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
a__ : str =self[r, c] * another
return result
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # Matrix multiplication
assert self.column == another.row
a__ : Any =Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
a__ : List[str] =F'''Unsupported type given for another ({type(lowerCAmelCase__ )})'''
raise TypeError(lowerCAmelCase__ )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
a__ : List[str] =Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
a__ : Optional[int] =self[r, c]
return result
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
a__ : List[str] =v.transpose()
a__ : Tuple =(v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _A ( ):
"""simple docstring"""
a__ : Dict =Matrix(3 , 3 , 0 )
for i in range(3 ):
a__ : List[Any] =1
print(f'''a^(-1) is {ainv}''' )
# u, v
a__ : Tuple =Matrix(3 , 1 , 0 )
a__ , a__ , a__ : Tuple =1, 2, -3
a__ : Optional[Any] =Matrix(3 , 1 , 0 )
a__ , a__ , a__ : int =4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}''' )
def _A ( ):
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 148 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCAmelCase : List[Any] = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase):
_lowercase : str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowercase : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowercase : Dict = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowercase : int = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : str =pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
a__ : Any =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
a__ : Any =text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}] )
a__ : Tuple =text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
] , )
a__ : List[Any] =text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
# Legacy behavior
a__ : Any =text_classifier("This is great !" , return_all_scores=lowerCAmelCase__ )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
a__ : List[str] =text_classifier("This is great !" , return_all_scores=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}]] )
a__ : Optional[int] =text_classifier(["This is great !", "Something else"] , return_all_scores=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
] , )
a__ : int =text_classifier(["This is great !", "Something else"] , return_all_scores=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
{"label": "LABEL_0", "score": 0.5_04},
{"label": "LABEL_0", "score": 0.5_04},
] , )
@require_torch
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
import torch
a__ : Dict =pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
a__ : Optional[Any] =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
@require_tf
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] =pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
a__ : Optional[Any] =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
@slow
@require_torch
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Union[str, Any] =pipeline("text-classification" )
a__ : Union[str, Any] =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "POSITIVE", "score": 1.0}] )
a__ : Optional[Any] =text_classifier("This is bad !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "NEGATIVE", "score": 1.0}] )
a__ : Dict =text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "POSITIVE", "score": 0.9_88}] )
@slow
@require_tf
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Tuple =pipeline("text-classification" , framework="tf" )
a__ : str =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "POSITIVE", "score": 1.0}] )
a__ : str =text_classifier("This is bad !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "NEGATIVE", "score": 1.0}] )
a__ : Dict =text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "POSITIVE", "score": 0.9_88}] )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] =TextClassificationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
a__ : Tuple =text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
a__ : List[Any] ="HuggingFace is in"
a__ : int =text_classifier(lowerCAmelCase__ )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
a__ : Optional[int] =["HuggingFace is in ", "Paris is in France"]
a__ : Optional[Any] =text_classifier(lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}, {"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
a__ : Union[str, Any] =text_classifier(lowerCAmelCase__ , top_k=lowerCAmelCase__ )
a__ : Optional[Any] =len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [[{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] * N, [{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] * N] , )
a__ : List[str] ={"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
a__ : Optional[Any] =text_classifier(lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
a__ : Any =[["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(lowerCAmelCase__ ):
text_classifier(lowerCAmelCase__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
a__ : Optional[int] =text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 148 | 1 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__snake_case : List[Any] = TypeVar('KT')
__snake_case : Optional[int] = TypeVar('VT')
class lowerCamelCase ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : KT | str = "root" , lowerCAmelCase_ : VT | None = None ) -> Any:
'''simple docstring'''
A__ : str =key
A__ : List[str] =value
A__ : list[Node[KT, VT]] =[]
def __repr__( self : Tuple ) -> str:
'''simple docstring'''
return f"Node({self.key}: {self.value})"
@property
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
return len(self.forward )
class lowerCamelCase ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : float = 0.5 , lowerCAmelCase_ : int = 16 ) -> Optional[int]:
'''simple docstring'''
A__ : Node[KT, VT] =Node[KT, VT]()
A__ : str =0
A__ : List[str] =p
A__ : Optional[int] =max_level
def __str__( self : Optional[int] ) -> str:
'''simple docstring'''
A__ : Any =list(self )
if len(lowerCAmelCase_ ) == 0:
return f"SkipList(level={self.level})"
A__ : Optional[Any] =max((len(str(lowerCAmelCase_ ) ) for item in items) , default=4 )
A__ : Union[str, Any] =max(lowerCAmelCase_ , 4 ) + 4
A__ : List[Any] =self.head
A__ : str =[]
A__ : str =node.forward.copy()
lines.append(f"[{node.key}]".ljust(lowerCAmelCase_ , """-""" ) + """* """ * len(lowerCAmelCase_ ) )
lines.append(""" """ * label_size + """| """ * len(lowerCAmelCase_ ) )
while len(node.forward ) != 0:
A__ : Optional[Any] =node.forward[0]
lines.append(
f"[{node.key}]".ljust(lowerCAmelCase_ , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(lowerCAmelCase_ ) )
A__ : Union[str, Any] =node.forward
lines.append("""None""".ljust(lowerCAmelCase_ ) + """* """ * len(lowerCAmelCase_ ) )
return f"SkipList(level={self.level})\n" + "\n".join(lowerCAmelCase_ )
def __iter__( self : Dict ) -> Optional[int]:
'''simple docstring'''
A__ : Any =self.head
while len(node.forward ) != 0:
yield node.forward[0].key
A__ : Tuple =node.forward[0]
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
A__ : Any =1
while random() < self.p and level < self.max_level:
level += 1
return level
def lowercase__ ( self : Any , lowerCAmelCase_ : List[str] ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
'''simple docstring'''
A__ : List[str] =[]
A__ : List[str] =self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
A__ : str =node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(lowerCAmelCase_ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def lowercase__ ( self : Dict , lowerCAmelCase_ : KT ) -> Dict:
'''simple docstring'''
A__ , A__ : Optional[Any] =self._locate_node(lowerCAmelCase_ )
if node is not None:
for i, update_node in enumerate(lowerCAmelCase_ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
A__ : str =node.forward[i]
else:
A__ : List[Any] =update_node.forward[:i]
def lowercase__ ( self : Tuple , lowerCAmelCase_ : KT , lowerCAmelCase_ : VT ) -> Tuple:
'''simple docstring'''
A__ , A__ : Optional[int] =self._locate_node(lowerCAmelCase_ )
if node is not None:
A__ : Dict =value
else:
A__ : Tuple =self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , lowerCAmelCase_ ):
update_vector.append(self.head )
A__ : List[str] =level
A__ : str =Node(lowerCAmelCase_ , lowerCAmelCase_ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(lowerCAmelCase_ )
else:
A__ : int =new_node
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : VT ) -> VT | None:
'''simple docstring'''
A__ , A__ : List[str] =self._locate_node(lowerCAmelCase_ )
if node is not None:
return node.value
return None
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
A__ : Any =SkipList()
skip_list.insert("""Key1""", 3 )
skip_list.insert("""Key2""", 12 )
skip_list.insert("""Key3""", 41 )
skip_list.insert("""Key4""", -19 )
A__ : Any =skip_list.head
A__ : Optional[Any] ={}
while node.level != 0:
A__ : str =node.forward[0]
A__ : Union[str, Any] =node.value
assert len(__snake_case ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A__ : Optional[Any] =SkipList()
skip_list.insert("""Key1""", 10 )
skip_list.insert("""Key1""", 12 )
skip_list.insert("""Key5""", 7 )
skip_list.insert("""Key7""", 10 )
skip_list.insert("""Key10""", 5 )
skip_list.insert("""Key7""", 7 )
skip_list.insert("""Key5""", 5 )
skip_list.insert("""Key10""", 10 )
A__ : List[Any] =skip_list.head
A__ : Dict ={}
while node.level != 0:
A__ : Optional[Any] =node.forward[0]
A__ : Any =node.value
if len(__snake_case ) != 4:
print()
assert len(__snake_case ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ : Any =SkipList()
assert skip_list.find("""Some key""" ) is None
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ : Optional[Any] =SkipList()
skip_list.insert("""Key2""", 20 )
assert skip_list.find("""Key2""" ) == 20
skip_list.insert("""Some Key""", 10 )
skip_list.insert("""Key2""", 8 )
skip_list.insert("""V""", 13 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 10
assert skip_list.find("""V""" ) == 13
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
A__ : Optional[int] =SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
A__ : List[Any] =SkipList()
skip_list.insert("""Key1""", 12 )
skip_list.insert("""V""", 13 )
skip_list.insert("""X""", 14 )
skip_list.insert("""Key2""", 15 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : int =SkipList()
skip_list.insert("""Key1""", 12 )
skip_list.insert("""V""", 13 )
skip_list.insert("""X""", 14 )
skip_list.insert("""Key2""", 15 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 14
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
A__ : Dict =SkipList()
skip_list.insert("""Key1""", 12 )
skip_list.insert("""V""", 13 )
skip_list.insert("""X""", 142 )
skip_list.insert("""Key2""", 15 )
skip_list.delete("""X""" )
def traverse_keys(__snake_case : Optional[int] ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__snake_case )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
def is_sorted(__snake_case : Any ):
return all(next_item >= item for item, next_item in zip(__snake_case, lst[1:] ) )
A__ : Dict =SkipList()
for i in range(10 ):
skip_list.insert(__snake_case, __snake_case )
assert is_sorted(list(__snake_case ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__snake_case ) )
skip_list.insert(-12, -12 )
skip_list.insert(77, 77 )
assert is_sorted(list(__snake_case ) )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
A__ : int =SkipList()
skip_list.insert(2, """2""" )
skip_list.insert(4, """4""" )
skip_list.insert(6, """4""" )
skip_list.insert(4, """5""" )
skip_list.insert(8, """4""" )
skip_list.insert(9, """4""" )
skip_list.delete(4 )
print(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 134 |
'''simple docstring'''
import os
from collections.abc import Iterator
def __lowerCamelCase ( __snake_case : str = "." ) -> Iterator[str]:
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(__snake_case ):
A__ : List[Any] =[d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__snake_case )[1] in (".py", ".ipynb"):
yield os.path.join(__snake_case, __snake_case ).lstrip("""./""" )
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return f"{i * ' '}*" if i else "\n##"
def __lowerCamelCase ( __snake_case : str, __snake_case : str ) -> str:
"""simple docstring"""
A__ : Optional[Any] =old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__snake_case ) or old_parts[i] != new_part) and new_part:
print(f"{md_prefix(__snake_case )} {new_part.replace('_', ' ' ).title()}" )
return new_path
def __lowerCamelCase ( __snake_case : str = "." ) -> None:
"""simple docstring"""
A__ : Any =""""""
for filepath in sorted(good_file_paths(__snake_case ) ):
A__ , A__ : Optional[int] =os.path.split(__snake_case )
if filepath != old_path:
A__ : Dict =print_path(__snake_case, __snake_case )
A__ : List[str] =(filepath.count(os.sep ) + 1) if filepath else 0
A__ : Union[str, Any] =f"{filepath}/{filename}".replace(""" """, """%20""" )
A__ : Optional[int] =os.path.splitext(filename.replace("""_""", """ """ ).title() )[0]
print(f"{md_prefix(__snake_case )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md('.')
| 134 | 1 |
def __lowerCAmelCase ( a__ , a__ , a__ ) -> int:
def count_of_possible_combinations(a__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(a__ )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> int:
def count_of_possible_combinations_with_dp_array(
a__ , a__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__a = sum(
count_of_possible_combinations_with_dp_array(target - item , a__ )
for item in array )
__a = answer
return answer
__a = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> int:
__a = [0] * (target + 1)
__a = 1
for i in range(1 , target + 1 ):
for j in range(a__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Tuple = 3
A : Tuple = 5
A : List[str] = [1, 2, 5]
print(combination_sum_iv(n, array, target)) | 354 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : str = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 33 | 0 |
def A_ ( _lowerCAmelCase ) -> int:
UpperCamelCase : Union[str, Any] = [[0 for _ in range(_snake_case )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCamelCase : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , _snake_case ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__lowerCamelCase : Any = int(input("""Enter a number: """).strip())
print(partition(n))
except ValueError:
print("""Please enter a number.""")
else:
try:
__lowerCamelCase : Optional[Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print("""Please pass a number.""")
| 52 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : Optional[str] = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files.'''} )
UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} )
UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} )
UpperCAmelCase : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase_ ( self : Dict ):
_A = {}
if self.train_dir is not None:
_A = self.train_dir
if self.validation_dir is not None:
_A = self.validation_dir
_A = data_files if data_files else None
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : str = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
UpperCAmelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase : str = field(default=__lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCAmelCase : float = field(
default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : float = field(
default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def _snake_case ( _snake_case : int ) -> Optional[int]:
'''simple docstring'''
_A = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def _snake_case ( ) -> List[str]:
'''simple docstring'''
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _snake_case , _snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A = training_args.get_process_log_level()
logger.setLevel(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_A = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0:
_A = ds['train'].train_test_split(data_args.train_val_split )
_A = split['train']
_A = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_A = ViTMAEConfig.from_pretrained(model_args.config_name , **_snake_case )
elif model_args.model_name_or_path:
_A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
_A = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case )
elif model_args.model_name_or_path:
_A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
_A = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_A = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_A = ViTMAEForPreTraining(_snake_case )
if training_args.do_train:
_A = ds['train'].column_names
else:
_A = ds['validation'].column_names
if data_args.image_column_name is not None:
_A = data_args.image_column_name
elif "image" in column_names:
_A = 'image'
elif "img" in column_names:
_A = 'img'
else:
_A = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_A = image_processor.size['shortest_edge']
else:
_A = (image_processor.size['height'], image_processor.size['width'])
_A = Compose(
[
Lambda(lambda _snake_case : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_snake_case : List[Any] ):
_A = [transforms(_snake_case ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
_A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
_A = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_snake_case )
# Compute absolute learning rate
_A = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_A = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
_A = Trainer(
model=_snake_case , args=_snake_case , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
_A = None
if training_args.resume_from_checkpoint is not None:
_A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A = last_checkpoint
_A = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_A = trainer.evaluate()
trainer.log_metrics('eval' , _snake_case )
trainer.save_metrics('eval' , _snake_case )
# Write model card and (optionally) push to hub
_A = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
def _snake_case ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 315 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Any )-> List[Any]:
torch.manual_seed(0 )
snake_case = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowerCAmelCase ( self : List[str] )-> Union[str, Any]:
torch.manual_seed(0 )
snake_case = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowerCAmelCase ( self : Tuple )-> str:
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__snake_case )
def lowerCAmelCase ( self : Optional[Any] )-> List[str]:
snake_case = self.dummy_uncond_unet
snake_case = DDIMScheduler()
snake_case = self.dummy_vq_model
snake_case = LDMPipeline(unet=__snake_case , vqvae=__snake_case , scheduler=__snake_case )
ldm.to(__snake_case )
ldm.set_progress_bar_config(disable=__snake_case )
snake_case = torch.manual_seed(0 )
snake_case = ldm(generator=__snake_case , num_inference_steps=2 , output_type="""numpy""" ).images
snake_case = torch.manual_seed(0 )
snake_case = ldm(generator=__snake_case , num_inference_steps=2 , output_type="""numpy""" , return_dict=__snake_case )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
snake_case = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[int] )-> str:
snake_case = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(__snake_case )
ldm.set_progress_bar_config(disable=__snake_case )
snake_case = torch.manual_seed(0 )
snake_case = ldm(generator=__snake_case , num_inference_steps=5 , output_type="""numpy""" ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
snake_case = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
snake_case = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 352 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : dict ) -> str:
snake_case = BeautifulSoup(requests.get(__lowerCAmelCase , params=__lowerCAmelCase ).content , """html.parser""" )
snake_case = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
snake_case = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 3 | 0 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _UpperCamelCase ( lowercase__ , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Tuple = OmegaConf.load(lowercase__ )
if display:
print(yaml.dump(OmegaConf.to_container(lowercase__ ) ) )
return config
def _UpperCamelCase ( lowercase__ , lowercase__=None , lowercase__=None ):
if conf_path is None:
__SCREAMING_SNAKE_CASE : int = '''./model_checkpoints/vqgan_only.yaml'''
__SCREAMING_SNAKE_CASE : List[str] = load_config(lowercase__ , display=lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = VQModel(**config.model.params )
if ckpt_path is None:
__SCREAMING_SNAKE_CASE : Optional[int] = '''./model_checkpoints/vqgan_only.pt'''
__SCREAMING_SNAKE_CASE : str = torch.load(lowercase__ , map_location=lowercase__ )
if ".ckpt" in ckpt_path:
__SCREAMING_SNAKE_CASE : int = sd['''state_dict''']
model.load_state_dict(lowercase__ , strict=lowercase__ )
model.to(lowercase__ )
del sd
return model
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = model.encode(lowercase__ )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__SCREAMING_SNAKE_CASE : Dict = model.decode(lowercase__ )
return xrec
def _UpperCamelCase ( lowercase__ , lowercase__=False ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = string.rsplit('''.''' , 1 )
if reload:
__SCREAMING_SNAKE_CASE : int = importlib.import_module(lowercase__ )
importlib.reload(lowercase__ )
return getattr(importlib.import_module(lowercase__ , package=lowercase__ ) , cls )
def _UpperCamelCase ( lowercase__ ):
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=True , lowercase__=True ):
__SCREAMING_SNAKE_CASE : Any = instantiate_from_config(lowercase__ )
if sd is not None:
model.load_state_dict(lowercase__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
# load the specified checkpoint
if ckpt:
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(lowercase__ , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE : List[Any] = pl_sd['''global_step''']
print(F'''loaded model from global step {global_step}.''' )
else:
__SCREAMING_SNAKE_CASE : Tuple = {'''state_dict''': None}
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : str = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=lowercase__ , eval_mode=lowercase__ )['''model''']
return model, global_step
| 9 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
a : Optional[Any] = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
a : List[str] = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
a : Any = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
a : str = sorted(arg_to_scheduler.keys())
a : Any = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class _a ( pl.LightningModule ):
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="base", SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = 0
UpperCAmelCase_: Any = Path(self.hparams.output_dir )
UpperCAmelCase_: Dict = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCAmelCase_: str = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path, **({"""num_labels""": num_labels} if num_labels is not None else {}), cache_dir=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
else:
UpperCAmelCase_: PretrainedConfig = config
UpperCAmelCase_: Union[str, Any] = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
assert hasattr(self.config, SCREAMING_SNAKE_CASE_ ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config, SCREAMING_SNAKE_CASE_, getattr(self.hparams, SCREAMING_SNAKE_CASE_ ) )
if tokenizer is None:
UpperCAmelCase_: List[Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path, cache_dir=SCREAMING_SNAKE_CASE_, )
else:
UpperCAmelCase_: PreTrainedTokenizer = tokenizer
UpperCAmelCase_: List[Any] = MODEL_MODES[mode]
if model is None:
UpperCAmelCase_: Any = self.model_type.from_pretrained(
self.hparams.model_name_or_path, from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ), config=self.config, cache_dir=SCREAMING_SNAKE_CASE_, )
else:
UpperCAmelCase_: Optional[Any] = model
def __snake_case (self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCAmelCase_: Any = self.model_type.from_pretrained(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> int:
UpperCAmelCase_: Dict = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCAmelCase_: Optional[Any] = get_schedule_func(
self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps() )
UpperCAmelCase_: Dict = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: str = self.model
UpperCAmelCase_: str = ["""bias""", """LayerNorm.weight"""]
UpperCAmelCase_: str = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
UpperCAmelCase_: List[str] = Adafactor(
SCREAMING_SNAKE_CASE_, lr=self.hparams.learning_rate, scale_parameter=SCREAMING_SNAKE_CASE_, relative_step=SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_: Union[str, Any] = AdamW(
SCREAMING_SNAKE_CASE_, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon )
UpperCAmelCase_: Optional[int] = optimizer
UpperCAmelCase_: int = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
return self.validation_step(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Dict:
return self.validation_end(SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> int:
UpperCAmelCase_: Tuple = max(1, self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCAmelCase_: int = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if stage == "test":
UpperCAmelCase_: int = len(self.test_dataloader().dataset )
else:
UpperCAmelCase_: Dict = self.get_dataloader("""train""", self.hparams.train_batch_size, shuffle=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = len(self.train_dataloader().dataset )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = False ) -> str:
raise NotImplementedError("""You must implement this for your task""" )
def __snake_case (self ) -> List[str]:
return self.train_loader
def __snake_case (self ) -> int:
return self.get_dataloader("""dev""", self.hparams.eval_batch_size, shuffle=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Any:
return self.get_dataloader("""test""", self.hparams.eval_batch_size, shuffle=SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Dict:
return os.path.join(
self.hparams.data_dir, """cached_{}_{}_{}""".format(
SCREAMING_SNAKE_CASE_, list(filter(SCREAMING_SNAKE_CASE_, self.hparams.model_name_or_path.split("""/""" ) ) ).pop(), str(self.hparams.max_seq_length ), ), )
@pl.utilities.rank_zero_only
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCAmelCase_: List[str] = self.output_dir.joinpath("""best_tfmr""" )
UpperCAmelCase_: List[Any] = self.step_count
self.model.save_pretrained(SCREAMING_SNAKE_CASE_ )
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
@staticmethod
def __snake_case (SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
parser.add_argument(
"""--model_name_or_path""", default=SCREAMING_SNAKE_CASE_, type=SCREAMING_SNAKE_CASE_, required=SCREAMING_SNAKE_CASE_, help="""Path to pretrained model or model identifier from huggingface.co/models""", )
parser.add_argument(
"""--config_name""", default="""""", type=SCREAMING_SNAKE_CASE_, help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""", default=SCREAMING_SNAKE_CASE_, type=SCREAMING_SNAKE_CASE_, help="""Pretrained tokenizer name or path if not the same as model_name""", )
parser.add_argument(
"""--cache_dir""", default=str(Path(SCREAMING_SNAKE_CASE_ ).parent / """test_run""" / """cache""" ), type=SCREAMING_SNAKE_CASE_, help="""Where do you want to store the pre-trained models downloaded from huggingface.co""", )
parser.add_argument(
"""--encoder_layerdrop""", type=SCREAMING_SNAKE_CASE_, help="""Encoder layer dropout probability (Optional). Goes into model.config""", )
parser.add_argument(
"""--decoder_layerdrop""", type=SCREAMING_SNAKE_CASE_, help="""Decoder layer dropout probability (Optional). Goes into model.config""", )
parser.add_argument(
"""--dropout""", type=SCREAMING_SNAKE_CASE_, help="""Dropout probability (Optional). Goes into model.config""", )
parser.add_argument(
"""--attention_dropout""", type=SCREAMING_SNAKE_CASE_, help="""Attention dropout probability (Optional). Goes into model.config""", )
parser.add_argument("""--learning_rate""", default=5E-5, type=SCREAMING_SNAKE_CASE_, help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""", default="""linear""", choices=SCREAMING_SNAKE_CASE_, metavar=SCREAMING_SNAKE_CASE_, type=SCREAMING_SNAKE_CASE_, help="""Learning rate scheduler""", )
parser.add_argument("""--weight_decay""", default=0.0, type=SCREAMING_SNAKE_CASE_, help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""", default=1E-8, type=SCREAMING_SNAKE_CASE_, help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""", default=0, type=SCREAMING_SNAKE_CASE_, help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""", default=4, type=SCREAMING_SNAKE_CASE_, help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""", dest="""max_epochs""", default=3, type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--train_batch_size""", default=32, type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--eval_batch_size""", default=32, type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--adafactor""", action="""store_true""" )
class _a ( pl.Callback ):
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _a ( pl.Callback ):
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(SCREAMING_SNAKE_CASE_ )
class _a ( pl.Callback ):
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCAmelCase_: Optional[Any] = trainer.lr_schedulers[0]["""scheduler"""]
UpperCAmelCase_: Optional[int] = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
rank_zero_info("""***** Validation results *****""" )
UpperCAmelCase_: int = trainer.callback_metrics
# Log results
for key in sorted(SCREAMING_SNAKE_CASE_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(SCREAMING_SNAKE_CASE_, str(metrics[key] ) ) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
rank_zero_info("""***** Test results *****""" )
UpperCAmelCase_: Any = trainer.callback_metrics
# Log and save results to file
UpperCAmelCase_: List[Any] = os.path.join(pl_module.hparams.output_dir, """test_results.txt""" )
with open(SCREAMING_SNAKE_CASE_, """w""" ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(SCREAMING_SNAKE_CASE_, str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(SCREAMING_SNAKE_CASE_, str(metrics[key] ) ) )
def lowerCAmelCase_ (lowerCAmelCase__: int , lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
parser.add_argument(
"""--output_dir""" , default=str(Path(lowerCAmelCase__ ).parent / """test_run""" / """model_checkpoints""" ) , type=lowerCAmelCase__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=lowerCAmelCase__ , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=lowerCAmelCase__ )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=lowerCAmelCase__ , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=lowerCAmelCase__ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=lowerCAmelCase__ , default=4_2 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(lowerCAmelCase__ ).parent / """test_run""" / """dummy-train-data""" ) , type=lowerCAmelCase__ , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def lowerCAmelCase_ (lowerCAmelCase__: BaseTransformer , lowerCAmelCase__: argparse.Namespace , lowerCAmelCase__: Union[str, Any]=None , lowerCAmelCase__: Optional[Any]=True , lowerCAmelCase__: Dict=[] , lowerCAmelCase__: Tuple=None , lowerCAmelCase__: List[str]=None , **lowerCAmelCase__: List[Any] , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
UpperCAmelCase_: Dict = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase__ )
# add custom checkpoints
if checkpoint_callback is None:
UpperCAmelCase_: Dict = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase__ )
if logging_callback is None:
UpperCAmelCase_: Any = LoggingCallback()
UpperCAmelCase_: Optional[int] = {}
if args.fpaa:
UpperCAmelCase_: List[str] = 1_6
if args.gpus > 1:
UpperCAmelCase_: str = """auto"""
UpperCAmelCase_: Union[str, Any] = """ddp"""
UpperCAmelCase_: Tuple = args.accumulate_grad_batches
UpperCAmelCase_: Optional[int] = None
UpperCAmelCase_: List[Any] = """auto"""
UpperCAmelCase_: Any = pl.Trainer.from_argparse_args(
lowerCAmelCase__ , weights_summary=lowerCAmelCase__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase__ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase__ , )
if args.do_train:
trainer.fit(lowerCAmelCase__ )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 147 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowercase : Optional[int] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowercase : Dict = TaTokenizerFast
lowercase : Optional[int] = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowercase : Any = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 311 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowercase : str = datasets.utils.logging.get_logger(__name__)
lowercase : Union[str, Any] = ['names', 'prefix']
lowercase : Union[str, Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
lowercase : List[Any] = ['encoding_errors', 'on_bad_lines']
lowercase : Any = ['date_format']
@dataclass
class A ( datasets.BuilderConfig ):
__magic_name__ = ","
__magic_name__ = None
__magic_name__ = "infer"
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = True
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = True
__magic_name__ = True
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
__magic_name__ = "."
__magic_name__ = None
__magic_name__ = '"'
__magic_name__ = 0
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
__magic_name__ = True
__magic_name__ = True
__magic_name__ = 0
__magic_name__ = True
__magic_name__ = False
__magic_name__ = None
__magic_name__ = 10000
__magic_name__ = None
__magic_name__ = "strict"
__magic_name__ = "error"
__magic_name__ = None
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
if self.delimiter is not None:
A : Optional[Any] = self.delimiter
if self.column_names is not None:
A : Optional[Any] = self.column_names
@property
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : str = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A ( datasets.ArrowBasedBuilder ):
__magic_name__ = CsvConfig
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
A : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE , (str, list, tuple) ):
A : str = data_files
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : int = [files]
A : Optional[int] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
A : Tuple = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : List[str] = [files]
A : List[str] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) )
return splits
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
A : Optional[int] = self.config.features.arrow_schema
if all(not require_storage_cast(SCREAMING_SNAKE_CASE ) for feature in self.config.features.values() ):
# cheaper cast
A : List[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
A : int = table_cast(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return pa_table
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
A : int = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE ) ):
A : Union[str, Any] = pd.read_csv(SCREAMING_SNAKE_CASE , iterator=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE ):
A : Dict = pa.Table.from_pandas(SCREAMING_SNAKE_CASE )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE )}: {e}' )
raise
| 311 | 1 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( __SCREAMING_SNAKE_CASE : int | str ):
"""simple docstring"""
lowercase_ : Optional[int] = str(__SCREAMING_SNAKE_CASE )
return n == n[::-1]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 1000000 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = 0
for i in range(1 , __SCREAMING_SNAKE_CASE ):
if is_palindrome(__SCREAMING_SNAKE_CASE ) and is_palindrome(bin(__SCREAMING_SNAKE_CASE ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 93 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_lowercase : Optional[Any] = (
"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"
)
_lowercase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Tuple = '''https://pypi.org/pypi/diffusers/json'''
lowercase_ : Tuple = json.loads(request.urlopen(__SCREAMING_SNAKE_CASE ).read() )['''releases'''].keys()
return sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : version.Version(__SCREAMING_SNAKE_CASE ) )
def snake_case_ ( ):
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__SCREAMING_SNAKE_CASE )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = Path(__SCREAMING_SNAKE_CASE ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] ):
"""simple docstring"""
init_hf_modules()
lowercase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
lowercase_ : str = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ : int = f.read()
# Imports of the form `import .xxx`
lowercase_ : List[Any] = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Unique-ify
return list(set(__SCREAMING_SNAKE_CASE ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : int = False
lowercase_ : Any = [module_file]
lowercase_ : Dict = []
# Let's recurse through all relative imports
while not no_change:
lowercase_ : Dict = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Union[str, Any] = Path(__SCREAMING_SNAKE_CASE ).parent
lowercase_ : Optional[int] = [str(module_path / m ) for m in new_imports]
lowercase_ : str = [f for f in new_import_files if f not in all_relative_imports]
lowercase_ : int = [F'''{f}.py''' for f in new_import_files]
lowercase_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE ) == 0
all_relative_imports.extend(__SCREAMING_SNAKE_CASE )
return all_relative_imports
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ : Union[str, Any] = f.read()
# Imports of the form `import xxx`
lowercase_ : Any = re.findall('''^\s*import\s+(\S+)\s*$''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Only keep the top-level module
lowercase_ : List[str] = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
lowercase_ : Any = list(set(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Optional[Any] = []
for imp in imports:
try:
importlib.import_module(__SCREAMING_SNAKE_CASE )
except ImportError:
missing_packages.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F'''{', '.join(__SCREAMING_SNAKE_CASE )}. Run `pip install {' '.join(__SCREAMING_SNAKE_CASE )}`''' )
return get_relative_imports(__SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
lowercase_ : List[Any] = module_path.replace(os.path.sep , '''.''' )
lowercase_ : Any = importlib.import_module(__SCREAMING_SNAKE_CASE )
if class_name is None:
return find_pipeline_class(__SCREAMING_SNAKE_CASE )
return getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
lowercase_ : int = dict(inspect.getmembers(__SCREAMING_SNAKE_CASE , inspect.isclass ) )
lowercase_ : Optional[Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __SCREAMING_SNAKE_CASE )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''' )
lowercase_ : List[Any] = cls
return pipeline_class
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : bool = False , ):
"""simple docstring"""
lowercase_ : Dict = str(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = module_file_or_url
lowercase_ : int = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
lowercase_ : Optional[int] = get_diffusers_versions()
# cut ".dev0"
lowercase_ : List[Any] = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
lowercase_ : List[str] = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
lowercase_ : List[str] = F'''v{revision}'''
elif revision == "main":
lowercase_ : Optional[Any] = revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {', '.join(available_versions + ['main'] )}.''' )
# community pipeline on GitHub
lowercase_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=__SCREAMING_SNAKE_CASE , pipeline=__SCREAMING_SNAKE_CASE )
try:
lowercase_ : Optional[Any] = cached_download(
__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , )
lowercase_ : Tuple = '''git'''
lowercase_ : Tuple = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
lowercase_ : str = hf_hub_download(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[Any] = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
lowercase_ : Tuple = check_imports(__SCREAMING_SNAKE_CASE )
# Now we move the module inside our cached dynamic modules.
lowercase_ : str = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = Path(__SCREAMING_SNAKE_CASE ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__SCREAMING_SNAKE_CASE , submodule_path / module_file )
for module_needed in modules_needed:
lowercase_ : Union[str, Any] = F'''{module_needed}.py'''
shutil.copy(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Tuple = use_auth_token
elif use_auth_token is True:
lowercase_ : List[Any] = HfFolder.get_token()
else:
lowercase_ : Optional[Any] = None
lowercase_ : Optional[int] = model_info(__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , token=__SCREAMING_SNAKE_CASE ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowercase_ : int = submodule_path / commit_hash
lowercase_ : Tuple = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__SCREAMING_SNAKE_CASE )
if not (submodule_path / module_file).exists():
shutil.copy(__SCREAMING_SNAKE_CASE , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__SCREAMING_SNAKE_CASE , F'''{module_needed}.py''' , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , )
return os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Optional[Any] , ):
"""simple docstring"""
lowercase_ : Optional[Any] = get_cached_module_file(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , )
return get_class_in_module(__SCREAMING_SNAKE_CASE , final_module.replace('''.py''' , '''''' ) )
| 93 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : list[str] | None = None ,__lowerCamelCase : dict[str, float] | None = None ,__lowerCamelCase : bool = False ,):
lowercase_ :List[Any] = cipher_alphabet or [chr(__lowerCamelCase ) for i in range(97 ,1_23 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowercase_ :int = {
'''a''': 0.08_497,
'''b''': 0.01_492,
'''c''': 0.02_202,
'''d''': 0.04_253,
'''e''': 0.11_162,
'''f''': 0.02_228,
'''g''': 0.02_015,
'''h''': 0.06_094,
'''i''': 0.07_546,
'''j''': 0.00_153,
'''k''': 0.01_292,
'''l''': 0.04_025,
'''m''': 0.02_406,
'''n''': 0.06_749,
'''o''': 0.07_507,
'''p''': 0.01_929,
'''q''': 0.00_095,
'''r''': 0.07_587,
'''s''': 0.06_327,
'''t''': 0.09_356,
'''u''': 0.02_758,
'''v''': 0.00_978,
'''w''': 0.02_560,
'''x''': 0.00_150,
'''y''': 0.01_994,
'''z''': 0.00_077,
}
else:
# Custom frequencies dictionary
lowercase_ :Tuple = frequencies_dict
if not case_sensitive:
lowercase_ :int = ciphertext.lower()
# Chi squared statistic values
lowercase_ :dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(__lowerCamelCase ) ):
lowercase_ :int = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowercase_ :List[Any] = (alphabet_letters.index(letter.lower() ) - shift) % len(
__lowerCamelCase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowercase_ :Dict = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowercase_ :Union[str, Any] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowercase_ :List[Any] = decrypted_with_shift.lower().count(__lowerCamelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowercase_ :Dict = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowercase_ :Union[str, Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowercase_ :Dict = decrypted_with_shift.count(__lowerCamelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowercase_ :List[str] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowercase_ :List[str] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowercase_ :Any = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(__lowerCamelCase : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowercase_ :int = min(
__lowerCamelCase ,key=__lowerCamelCase ,)
# Get all the data from the most likely cipher (key, decoded message)
(
lowercase_
) :List[Any] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 361 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
@add_end_docstrings(_lowerCAmelCase )
class a_ ( _lowerCAmelCase ):
def __init__( self : List[Any] , **lowercase : Optional[int] ):
"""simple docstring"""
super().__init__(**lowercase )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , "vision" )
self.check_model_type(lowercase )
def __call__( self : Tuple , lowercase : Union[str, "Image.Image", List[Dict[str, Any]]] , lowercase : Union[str, List[str]] = None , **lowercase : str , ):
"""simple docstring"""
if "text_queries" in kwargs:
lowercase_ :List[Any] = kwargs.pop("text_queries" )
if isinstance(lowercase , (str, Image.Image) ):
lowercase_ :List[str] = {"image": image, "candidate_labels": candidate_labels}
else:
lowercase_ :Optional[Any] = image
lowercase_ :str = super().__call__(lowercase , **lowercase )
return results
def lowercase__ ( self : Optional[int] , **lowercase : List[str] ):
"""simple docstring"""
lowercase_ :Tuple = {}
if "threshold" in kwargs:
lowercase_ :Dict = kwargs["threshold"]
if "top_k" in kwargs:
lowercase_ :Optional[Any] = kwargs["top_k"]
return {}, {}, postprocess_params
def lowercase__ ( self : List[Any] , lowercase : Dict ):
"""simple docstring"""
lowercase_ :Any = load_image(inputs["image"] )
lowercase_ :List[str] = inputs["candidate_labels"]
if isinstance(lowercase , lowercase ):
lowercase_ :Union[str, Any] = candidate_labels.split("," )
lowercase_ :Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase ):
lowercase_ :Union[str, Any] = self.tokenizer(lowercase , return_tensors=self.framework )
lowercase_ :Tuple = self.image_processor(lowercase , return_tensors=self.framework )
yield {
"is_last": i == len(lowercase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase__ ( self : List[str] , lowercase : List[str] ):
"""simple docstring"""
lowercase_ :Dict = model_inputs.pop("target_size" )
lowercase_ :str = model_inputs.pop("candidate_label" )
lowercase_ :List[Any] = model_inputs.pop("is_last" )
lowercase_ :Optional[Any] = self.model(**lowercase )
lowercase_ :str = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def lowercase__ ( self : Optional[int] , lowercase : List[str] , lowercase : List[str]=0.1 , lowercase : Optional[int]=None ):
"""simple docstring"""
lowercase_ :Dict = []
for model_output in model_outputs:
lowercase_ :int = model_output["candidate_label"]
lowercase_ :str = BaseModelOutput(lowercase )
lowercase_ :List[Any] = self.image_processor.post_process_object_detection(
outputs=lowercase , threshold=lowercase , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
lowercase_ :Optional[int] = outputs["scores"][index].item()
lowercase_ :int = self._get_bounding_box(outputs["boxes"][index][0] )
lowercase_ :int = {"score": score, "label": label, "box": box}
results.append(lowercase )
lowercase_ :Dict = sorted(lowercase , key=lambda lowercase : x["score"] , reverse=lowercase )
if top_k:
lowercase_ :List[str] = results[:top_k]
return results
def lowercase__ ( self : Union[str, Any] , lowercase : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
lowercase_ , lowercase_ , lowercase_ , lowercase_ :List[str] = box.int().tolist()
lowercase_ :List[Any] = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 147 | 0 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class UpperCAmelCase :
def __init__( self :Union[str, Any] , lowercase_ :List[str] , )-> Tuple:
A__ = parent
A__ = 13
A__ = 7
A__ = 30
A__ = self.seq_length + self.mem_len
A__ = 15
A__ = True
A__ = True
A__ = 99
A__ = [10, 50, 80]
A__ = 32
A__ = 32
A__ = 4
A__ = 8
A__ = 1_28
A__ = 2
A__ = 2
A__ = None
A__ = 1
A__ = 0
A__ = 3
A__ = self.vocab_size - 1
A__ = 0.0_1
def UpperCAmelCase_ ( self :List[str] )-> int:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def UpperCAmelCase_ ( self :str )-> Optional[int]:
random.seed(self.seed )
tf.random.set_seed(self.seed )
def UpperCAmelCase_ ( self :Dict , lowercase_ :Tuple , lowercase_ :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :List[str] )-> List[str]:
A__ = TFTransfoXLModel(lowercase_ )
A__, A__ = model(lowercase_ ).to_tuple()
A__ = {"input_ids": input_ids_a, "mems": mems_a}
A__, A__ = model(lowercase_ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :str , lowercase_ :Optional[int] , lowercase_ :List[Any] , lowercase_ :Optional[int] )-> Optional[Any]:
A__ = TFTransfoXLLMHeadModel(lowercase_ )
A__, A__ = model(lowercase_ ).to_tuple()
A__ = {"input_ids": input_ids_a, "labels": lm_labels}
A__, A__ = model(lowercase_ ).to_tuple()
A__, A__ = model([input_ids_a, mems_a] ).to_tuple()
A__ = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
A__, A__ = model(lowercase_ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :List[Any] , lowercase_ :List[Any] , lowercase_ :Dict , lowercase_ :str )-> Dict:
A__ = TFTransfoXLForSequenceClassification(lowercase_ )
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self :Optional[Any] )-> Tuple:
A__ = self.prepare_config_and_inputs()
((A__), (A__), (A__), (A__)) = config_and_inputs
A__ = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__lowercase = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__lowercase = () if is_tf_available() else ()
__lowercase = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = False
def UpperCAmelCase_ ( self :str , lowercase_ :int , lowercase_ :Union[str, Any] , lowercase_ :Optional[Any] , lowercase_ :str , lowercase_ :Union[str, Any] )-> int:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def UpperCAmelCase_ ( self :int )-> Union[str, Any]:
A__ = TFTransfoXLModelTester(self )
A__ = ConfigTester(self , config_class=lowercase_ , d_embed=37 )
def UpperCAmelCase_ ( self :List[str] )-> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self :List[str] )-> Any:
self.model_tester.set_seed()
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> List[str]:
self.model_tester.set_seed()
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*lowercase_ )
def UpperCAmelCase_ ( self :Union[str, Any] )-> Optional[Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowercase_ )
def UpperCAmelCase_ ( self :str )-> Optional[int]:
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
A__ = model_class(lowercase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
A__ = model.get_output_embeddings()
assert isinstance(lowercase_ , tf.keras.layers.Layer )
A__ = model.get_bias()
assert name is None
else:
A__ = model.get_output_embeddings()
assert x is None
A__ = model.get_bias()
assert name is None
def UpperCAmelCase_ ( self :Tuple )-> List[Any]:
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def UpperCAmelCase_ ( self :Union[str, Any] )-> Union[str, Any]:
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFTransfoXLModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss." )
def UpperCAmelCase_ ( self :List[str] )-> int:
pass
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
@unittest.skip("Skip test until #12651 is resolved." )
@slow
def UpperCAmelCase_ ( self :List[str] )-> Tuple:
A__ = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" )
# fmt: off
A__ = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
A__ = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
A__ = model.generate(lowercase_ , max_length=2_00 , do_sample=lowercase_ )
self.assertListEqual(output_ids[0].numpy().tolist() , lowercase_ )
| 237 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase : Optional[Any] =[8, 5, 9, 7]
__lowerCAmelCase : Optional[int] =[
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase : List[Any] =[
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCAmelCase :
def __init__( self :Optional[int] , lowercase_ :list[int] , lowercase_ :list[list[int]] , lowercase_ :list[list[int]] , )-> None:
A__ = claim_vector
A__ = allocated_resources_table
A__ = maximum_claim_table
def UpperCAmelCase_ ( self :Union[str, Any] )-> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCAmelCase_ ( self :Tuple )-> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCAmelCase_ ( self :Dict )-> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(lowercase_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCAmelCase_ ( self :Any )-> dict[int, list[int]]:
return {self.__need().index(lowercase_ ): i for i in self.__need()}
def UpperCAmelCase_ ( self :Tuple , **lowercase_ :List[str] )-> None:
A__ = self.__need()
A__ = self.__allocated_resources_table
A__ = self.__available_resources()
A__ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
A__ = False
for each_need in need_list:
A__ = True
for index, need in enumerate(lowercase_ ):
if need > available_resources[index]:
A__ = False
break
if execution:
A__ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
A__ = original_need_index
print(F"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(lowercase_ )
# update available/freed resources stack
A__ = np.array(lowercase_ ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(lowercase_ ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def UpperCAmelCase_ ( self :Optional[Any] )-> Tuple:
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
F"P{self.__allocated_resources_table.index(lowercase_ ) + 1}"
+ " ".join(F"{it:>8}" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
F"P{self.__maximum_claim_table.index(lowercase_ ) + 1}"
+ " ".join(F"{it:>8}" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(lowercase_ ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(lowercase_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237 | 1 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class UpperCAmelCase_ ( _UpperCamelCase ):
def __init__( self : str , A : str = "▁" , A : bool = True , A : Union[str, AddedToken] = "<unk>" , A : Union[str, AddedToken] = "</s>" , A : Union[str, AddedToken] = "<pad>" , ):
_UpperCAmelCase : List[Any] = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
_UpperCAmelCase : Optional[int] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
_UpperCAmelCase : List[str] = token_dict["token"]
_UpperCAmelCase : List[str] = Tokenizer(Unigram() )
_UpperCAmelCase : str = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) , " " ),
normalizers.Lowercase(),
] )
_UpperCAmelCase : List[Any] = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=A , add_prefix_space=A ),
pre_tokenizers.Digits(individual_digits=A ),
pre_tokenizers.Punctuation(),
] )
_UpperCAmelCase : Dict = decoders.Metaspace(replacement=A , add_prefix_space=A )
_UpperCAmelCase : Dict = TemplateProcessing(
single=f'$A {self.special_tokens["eos"]["token"]}' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
_UpperCAmelCase : List[Any] = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(A , A )
def snake_case_ ( self : Any , A : Union[str, List[str]] , A : int = 8_0_0_0 , A : bool = True , ):
_UpperCAmelCase : int = trainers.UnigramTrainer(
vocab_size=A , special_tokens=self.special_tokens_list , show_progress=A , )
if isinstance(A , A ):
_UpperCAmelCase : List[str] = [files]
self._tokenizer.train(A , trainer=A )
self.add_unk_id()
def snake_case_ ( self : Any , A : Union[Iterator[str], Iterator[Iterator[str]]] , A : int = 8_0_0_0 , A : bool = True , ):
_UpperCAmelCase : Optional[int] = trainers.UnigramTrainer(
vocab_size=A , special_tokens=self.special_tokens_list , show_progress=A , )
self._tokenizer.train_from_iterator(A , trainer=A )
self.add_unk_id()
def snake_case_ ( self : Any ):
_UpperCAmelCase : Optional[Any] = json.loads(self._tokenizer.to_str() )
_UpperCAmelCase : str = self.special_tokens["unk"]["id"]
_UpperCAmelCase : Optional[int] = Tokenizer.from_str(json.dumps(A ) )
| 202 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : List[str] = tempfile.mkdtemp()
# fmt: off
_UpperCAmelCase : Union[str, Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_UpperCAmelCase : List[Any] = dict(zip(A , range(len(A ) ) ) )
_UpperCAmelCase : Union[str, Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_UpperCAmelCase : Optional[int] = {"unk_token": "<unk>"}
_UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(A ) )
_UpperCAmelCase : List[str] = {
"do_resize": True,
"size": 2_0,
"do_center_crop": True,
"crop_size": 1_8,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_UpperCAmelCase : Any = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(A , A )
def snake_case_ ( self : List[Any] , **A : Union[str, Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A )
def snake_case_ ( self : int , **A : Any ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A )
def snake_case_ ( self : List[str] , **A : Optional[Any] ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **A )
def snake_case_ ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : str ):
_UpperCAmelCase : int = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_UpperCAmelCase : Dict = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Dict = self.get_rust_tokenizer()
_UpperCAmelCase : int = self.get_image_processor()
_UpperCAmelCase : List[Any] = CLIPProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A )
_UpperCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase : Any = self.get_image_processor(do_normalize=A , padding_value=1.0 )
_UpperCAmelCase : Any = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : str = self.get_image_processor()
_UpperCAmelCase : List[str] = self.get_tokenizer()
_UpperCAmelCase : Any = CLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Dict = self.prepare_image_inputs()
_UpperCAmelCase : Optional[int] = image_processor(A , return_tensors="np" )
_UpperCAmelCase : Any = processor(images=A , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case_ ( self : str ):
_UpperCAmelCase : Tuple = self.get_image_processor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : List[str] = CLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Optional[int] = "lower newer"
_UpperCAmelCase : Union[str, Any] = processor(text=A )
_UpperCAmelCase : Optional[int] = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Union[str, Any] = self.get_image_processor()
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : str = CLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Tuple = "lower newer"
_UpperCAmelCase : Union[str, Any] = self.prepare_image_inputs()
_UpperCAmelCase : str = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def snake_case_ ( self : int ):
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : Dict = self.get_tokenizer()
_UpperCAmelCase : List[Any] = CLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : List[str] = processor.batch_decode(A )
_UpperCAmelCase : int = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : Optional[Any] = self.get_image_processor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : int = CLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : str = "lower newer"
_UpperCAmelCase : int = self.prepare_image_inputs()
_UpperCAmelCase : Optional[Any] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 202 | 1 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _UpperCamelCase ( lowercase__ ):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__lowerCAmelCase : Optional[Any] ='\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class _lowercase ( A__ ):
'''simple docstring'''
@staticmethod
def __magic_name__( lowerCAmelCase__ :ArgumentParser ) -> Any:
__SCREAMING_SNAKE_CASE : List[Any] = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=lowerCAmelCase__ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self :int , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :str , *lowerCAmelCase__ :List[Any] , ) -> Dict:
__SCREAMING_SNAKE_CASE : str = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(f'''Loading model {model_type}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = model_type
__SCREAMING_SNAKE_CASE : str = tf_checkpoint
__SCREAMING_SNAKE_CASE : Optional[int] = pytorch_dump_output
__SCREAMING_SNAKE_CASE : str = config
__SCREAMING_SNAKE_CASE : Optional[int] = finetuning_task_name
def __magic_name__( self :int ) -> List[str]:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
if "ckpt" in self._tf_checkpoint.lower():
__SCREAMING_SNAKE_CASE : Dict = self._tf_checkpoint
__SCREAMING_SNAKE_CASE : int = ''''''
else:
__SCREAMING_SNAKE_CASE : int = self._tf_checkpoint
__SCREAMING_SNAKE_CASE : List[Any] = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
lowerCAmelCase__ , self._config , self._pytorch_dump_output , lowerCAmelCase__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 9 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowercase__ ( lowercase ):
lowercase__ = """openai/whisper-base"""
lowercase__ = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
lowercase__ = """transcriber"""
lowercase__ = WhisperProcessor
lowercase__ = WhisperForConditionalGeneration
lowercase__ = ["""audio"""]
lowercase__ = ["""text"""]
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
return self.pre_processor(lowerCamelCase__ ,return_tensors='pt' ).input_features
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
return self.model.generate(inputs=lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ )[0]
| 83 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowerCAmelCase__ ( A_ ):
__a = """gpt_neo"""
__a = ["""past_key_values"""]
__a = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : str , _lowerCamelCase : Any=50257 , _lowerCamelCase : int=2048 , _lowerCamelCase : List[Any]=2048 , _lowerCamelCase : int=24 , _lowerCamelCase : Tuple=[[["global", "local"], 12]] , _lowerCamelCase : Dict=16 , _lowerCamelCase : Dict=None , _lowerCamelCase : Any=256 , _lowerCamelCase : List[str]="gelu_new" , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[Any]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : List[str]=1e-5 , _lowerCamelCase : List[Any]=0.0_2 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Dict=50256 , _lowerCamelCase : List[str]=50256 , **_lowerCamelCase : List[Any] , ):
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_layers
_snake_case = num_heads
_snake_case = intermediate_size
_snake_case = window_size
_snake_case = activation_function
_snake_case = resid_dropout
_snake_case = embed_dropout
_snake_case = attention_dropout
_snake_case = classifier_dropout
_snake_case = layer_norm_epsilon
_snake_case = initializer_range
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = attention_types
_snake_case = self.expand_attention_types_params(_lowerCamelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
@staticmethod
def lowercase ( _lowerCamelCase : Union[str, Any] ):
_snake_case = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ) -> Dict:
import torch
_snake_case = input.size()
_snake_case = len(__lowerCamelCase )
_snake_case = shape[dimension]
_snake_case = torch.arange(0 , __lowerCamelCase , __lowerCamelCase )
_snake_case = torch.div(sizedim - size , __lowerCamelCase , rounding_mode='''floor''' ) + 1
_snake_case = torch.arange(__lowerCamelCase ) + low_indices[:min_length][:, None]
_snake_case = [slice(__lowerCamelCase )] * rank
_snake_case = indices
_snake_case = input[s]
_snake_case = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Any ) -> Tuple:
import torch
_snake_case = torch.arange(1 , __lowerCamelCase )
_snake_case = torch.remainder(__lowerCamelCase , __lowerCamelCase )
_snake_case = remainders == 0
_snake_case = candidates[divisor_indices]
_snake_case = torch.max(__lowerCamelCase )
return largest_divisor, torch.div(__lowerCamelCase , __lowerCamelCase , rounding_mode='''floor''' )
class lowerCAmelCase__ ( A_ ):
@property
def lowercase ( self : Union[str, Any] ):
_snake_case = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction='''inputs''' )
_snake_case = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_snake_case = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def lowercase ( self : Union[str, Any] ):
return self._config.num_heads
def lowercase ( self : List[Any] , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int = -1 , _lowerCamelCase : int = -1 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[TensorType] = None , ):
_snake_case = super(_lowerCamelCase , self ).generate_dummy_inputs(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
# We need to order the input in the way they appears in the forward()
_snake_case = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_snake_case , _snake_case = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_snake_case = seqlen + 2
_snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_snake_case = [
(torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(self.num_layers )
]
_snake_case = common_inputs['''attention_mask''']
if self.use_past:
_snake_case = ordered_inputs['''attention_mask'''].dtype
_snake_case = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def lowercase ( self : str ):
return 13
| 354 |
"""simple docstring"""
from timeit import timeit
UpperCAmelCase__ = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _UpperCAmelCase ( __lowerCamelCase : str ) -> bool:
_snake_case = 0
_snake_case = len(__lowerCamelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _UpperCAmelCase ( __lowerCamelCase : str ) -> bool:
_snake_case = len(__lowerCamelCase ) // 2
_snake_case = len(__lowerCamelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(__lowerCamelCase ) )
def _UpperCAmelCase ( __lowerCamelCase : str ) -> bool:
if len(__lowerCamelCase ) <= 2:
return True
if s[0] == s[len(__lowerCamelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _UpperCAmelCase ( __lowerCamelCase : str ) -> bool:
return s == s[::-1]
def _UpperCAmelCase ( __lowerCamelCase : str ) -> None:
_snake_case = f'''all({name}(key) is value for key, value in test_data.items())'''
_snake_case = f'''from __main__ import test_data, {name}'''
_snake_case = 50_00_00
_snake_case = timeit(stmt=__lowerCamelCase , setup=__lowerCamelCase , number=__lowerCamelCase )
print(f'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"{key:21} {value}")
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 40 | 0 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Tuple = VideoToVideoSDPipeline
_snake_case : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {'''image''', '''width''', '''height'''}
_snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {'''image'''}
_snake_case : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
_snake_case : Any = False
# No `output_type`.
_snake_case : List[str] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __UpperCAmelCase ( self ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase_ : str = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , )
UpperCAmelCase_ : List[Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , )
torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
UpperCAmelCase_ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
UpperCAmelCase_ : Optional[Any] = CLIPTextModel(_UpperCamelCase )
UpperCAmelCase_ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase_ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Optional[int]:
# 3 frames
UpperCAmelCase_ : Any = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
if str(_UpperCamelCase ).startswith('mps' ):
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = VideoToVideoSDPipeline(**_UpperCamelCase )
UpperCAmelCase_ : List[Any] = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_UpperCamelCase )
UpperCAmelCase_ : Tuple = 'np'
UpperCAmelCase_ : List[Any] = sd_pipe(**_UpperCamelCase ).frames
UpperCAmelCase_ : Union[str, Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
UpperCAmelCase_ : int = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self ) -> int:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_UpperCamelCase , expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def __UpperCAmelCase ( self ) -> Tuple:
pass
def __UpperCAmelCase ( self ) -> int:
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Tuple = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase_ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCAmelCase_ : Any = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = video.to('cuda' )
UpperCAmelCase_ : List[Any] = 'Spiderman is surfing'
UpperCAmelCase_ : List[str] = pipe(_UpperCamelCase , video=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=3 , output_type='pt' ).frames
UpperCAmelCase_ : List[Any] = np.array([-1.0_45_89_84, -1.1_27_92_97, -0.9_66_30_86, -0.91_50_39_06, -0.75_09_76_56] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 29 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 1 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_snake_case : List[Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class a (unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase : Any , lowerCamelCase : int=7 , lowerCamelCase : int=3 , lowerCamelCase : List[str]=18 , lowerCamelCase : str=30 , lowerCamelCase : Tuple=400 , lowerCamelCase : List[Any]=None , lowerCamelCase : List[Any]=True , lowerCamelCase : Any=True , lowerCamelCase : Optional[Any]=None , ) -> List[str]:
__snake_case : int = size if size is not None else {'height': 20, 'width': 20}
__snake_case : List[Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : List[Any] = num_channels
__snake_case : str = image_size
__snake_case : Optional[Any] = min_resolution
__snake_case : str = max_resolution
__snake_case : List[Any] = size
__snake_case : int = do_normalize
__snake_case : Any = do_convert_rgb
__snake_case : Tuple = [512, 1024, 2048, 4096]
__snake_case : Tuple = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def __snake_case ( self : List[Any] ) -> List[str]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __snake_case ( self : List[Any] ) -> int:
__snake_case : List[str] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
__snake_case : Any = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class a (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = PixaStructImageProcessor if is_vision_available() else None
def __snake_case ( self : List[Any] ) -> str:
__snake_case : Dict = PixaStructImageProcessingTester(self )
@property
def __snake_case ( self : Dict ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : str ) -> int:
__snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) )
def __snake_case ( self : Dict ) -> List[Any]:
__snake_case : List[Any] = self.image_processor_tester.prepare_dummy_image()
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
__snake_case : List[str] = 2048
__snake_case : Dict = image_processor(_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1E-3 , rtol=1E-3 ) )
def __snake_case ( self : Dict ) -> Union[str, Any]:
# Initialize image_processor
__snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__snake_case : Dict = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__snake_case : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__snake_case : Union[str, Any] = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __snake_case ( self : int ) -> Union[str, Any]:
# Initialize image_processor
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__snake_case : Dict = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
__snake_case : List[str] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_UpperCAmelCase ):
__snake_case : str = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
__snake_case : Union[str, Any] = 'Hello'
__snake_case : str = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__snake_case : Union[str, Any] = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __snake_case ( self : int ) -> Dict:
# Initialize image_processor
__snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
__snake_case : Any = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__snake_case : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__snake_case : str = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __snake_case ( self : Union[str, Any] ) -> int:
# Initialize image_processor
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__snake_case : Union[str, Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__snake_case : Optional[int] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__snake_case : Union[str, Any] = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class a (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = PixaStructImageProcessor if is_vision_available() else None
def __snake_case ( self : Tuple ) -> Any:
__snake_case : Tuple = PixaStructImageProcessingTester(self , num_channels=4 )
__snake_case : Tuple = 3
@property
def __snake_case ( self : Dict ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Union[str, Any] ) -> Tuple:
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) )
def __snake_case ( self : Tuple ) -> Optional[int]:
# Initialize image_processor
__snake_case : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__snake_case : int = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__snake_case : Optional[int] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__snake_case : Optional[Any] = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 369 |
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if index == r:
for j in range(__lowerCamelCase ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__snake_case : Union[str, Any] = arr[i]
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 , __lowerCamelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# A temporary array to store all combination one by one
__snake_case : Union[str, Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , 0 , __lowerCamelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_snake_case : List[str] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 134 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str]=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=99 , _UpperCAmelCase : int=32 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Tuple=37 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Union[str, Any]=5_12 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]="None" , _UpperCAmelCase : str=3 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : Optional[Any]=None , ):
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = num_choices
UpperCAmelCase__ = relative_attention
UpperCAmelCase__ = position_biased_input
UpperCAmelCase__ = pos_att_type
UpperCAmelCase__ = scope
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = TFDebertaVaModel(config=_UpperCAmelCase )
UpperCAmelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase__ = [input_ids, input_mask]
UpperCAmelCase__ = model(_UpperCAmelCase )
UpperCAmelCase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = TFDebertaVaForMaskedLM(config=_UpperCAmelCase )
UpperCAmelCase__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCAmelCase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFDebertaVaForSequenceClassification(config=_UpperCAmelCase )
UpperCAmelCase__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCAmelCase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFDebertaVaForTokenClassification(config=_UpperCAmelCase )
UpperCAmelCase__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCAmelCase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = TFDebertaVaForQuestionAnswering(config=_UpperCAmelCase )
UpperCAmelCase__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCAmelCase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : Tuple = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Any = False
lowerCAmelCase_ : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = TFDebertaVaModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(_UpperCAmelCase )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
UpperCAmelCase__ = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
UpperCAmelCase__ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
UpperCAmelCase__ = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 )
| 346 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
UpperCAmelCase_ = '\\n\n'
UpperCAmelCase_ = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
UpperCAmelCase_ = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : int = 16 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[int]=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ = """cuda"""
else:
UpperCAmelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase )
UpperCAmelCase__ = model.to(_UpperCAmelCase )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_UpperCAmelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ = model.config.max_length - 1
else:
UpperCAmelCase__ = model.config.max_length
UpperCAmelCase__ = tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors="""pt""" , return_attention_mask=_UpperCAmelCase , ).to(_UpperCAmelCase )
UpperCAmelCase__ = encodings["""input_ids"""]
UpperCAmelCase__ = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ = []
UpperCAmelCase__ = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ) ):
UpperCAmelCase__ = min(start_index + batch_size , len(_UpperCAmelCase ) )
UpperCAmelCase__ = encoded_texts[start_index:end_index]
UpperCAmelCase__ = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_UpperCAmelCase )
UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCAmelCase__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_UpperCAmelCase ), attn_mask] , dim=1 )
UpperCAmelCase__ = encoded_batch
with torch.no_grad():
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).logits
UpperCAmelCase__ = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ = labels[..., 1:].contiguous()
UpperCAmelCase__ = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _UpperCAmelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_UpperCAmelCase )}
| 346 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_A = logging.get_logger(__name__)
class _lowerCamelCase ( __snake_case ):
def __init__( self : Optional[Any] , *UpperCamelCase : Tuple , **UpperCamelCase : List[str] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 362 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowercase_ ( __UpperCAmelCase ) -> List[str]:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _lowerCamelCase ( a_ ):
@staticmethod
def _lowerCAmelCase ( UpperCamelCase : ArgumentParser ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=UpperCamelCase , default=UpperCamelCase , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=UpperCamelCase , help="""Name of the model to download""" )
download_parser.set_defaults(func=UpperCamelCase )
def __init__( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : bool , UpperCamelCase : bool ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : int = model
lowerCAmelCase__ : Union[str, Any] = cache
lowerCAmelCase__ : Optional[int] = force
lowerCAmelCase__ : Dict = trust_remote_code
def _lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 212 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ :Any = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Any = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Optional[int] = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Optional[Any] = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Tuple = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Tuple = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
A_ :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 71 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case__(_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 768 , ):
super().__init__()
lowercase__ : List[str] = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[int] = nn.Parameter(torch.ones(1 , SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Union[str, torch.device]] = None , SCREAMING_SNAKE_CASE : Optional[torch.dtype] = None , ):
lowercase__ : Union[str, Any] = nn.Parameter(self.mean.to(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) )
lowercase__ : Dict = nn.Parameter(self.std.to(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) )
return self
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : Optional[int] = (embeds - self.mean) * 1.0 / self.std
return embeds
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Any = (embeds * self.std) + self.mean
return embeds
| 130 | 0 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowerCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def snake_case_ ( snake_case , snake_case , snake_case = 1_60_00 ) -> List[Any]:
lowercase__: List[str] = int(round(sample_rate * max_length ) )
if len(snake_case ) <= sample_length:
return wav
lowercase__: Optional[int] = randint(0 , len(snake_case ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __a :
__lowercase : Optional[str] = field(default=__UpperCamelCase , metadata={'help': 'Name of a dataset from the datasets package'} )
__lowercase : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'A file containing the training audio paths and labels.'} )
__lowercase : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'A file containing the validation audio paths and labels.'} )
__lowercase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
__lowercase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
__lowercase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
__lowercase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} )
__lowercase : Optional[int] = field(
default=__UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowercase : Optional[int] = field(
default=__UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
__lowercase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class __a :
__lowercase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
__lowercase : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowercase : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} )
__lowercase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowercase : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'Name or path of preprocessor config.'} )
__lowercase : bool = field(
default=__UpperCamelCase , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} )
__lowercase : bool = field(
default=__UpperCamelCase , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} )
__lowercase : bool = field(
default=__UpperCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__lowercase : Optional[bool] = field(
default=__UpperCamelCase , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
__lowercase : bool = field(
default=__UpperCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , lowerCAmelCase__ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def snake_case_ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__: str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__: Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__: int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' , snake_case , snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__: int = training_args.get_process_log_level()
logger.setLevel(snake_case )
transformers.utils.logging.set_verbosity(snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowercase__: Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__: str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
lowercase__: Optional[Any] = DatasetDict()
lowercase__: int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__: Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'Make sure to set `--audio_column_name` to the correct audio column - one of '
f'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'Make sure to set `--label_column_name` to the correct text column - one of '
f'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowercase__: Optional[Any] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowercase__: str = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowercase__: Tuple = feature_extractor.model_input_names[0]
def train_transforms(snake_case ):
lowercase__: int = []
for audio in batch[data_args.audio_column_name]:
lowercase__: List[Any] = random_subsample(
audio['array'] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(snake_case )
lowercase__: Union[str, Any] = feature_extractor(snake_case , sampling_rate=feature_extractor.sampling_rate )
lowercase__: Dict = {model_input_name: inputs.get(snake_case )}
lowercase__: int = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(snake_case ):
lowercase__: Optional[int] = [audio['array'] for audio in batch[data_args.audio_column_name]]
lowercase__: Dict = feature_extractor(snake_case , sampling_rate=feature_extractor.sampling_rate )
lowercase__: str = {model_input_name: inputs.get(snake_case )}
lowercase__: Dict = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase__: int = raw_datasets['train'].features[data_args.label_column_name].names
lowercase__ , lowercase__: Optional[int] = {}, {}
for i, label in enumerate(snake_case ):
lowercase__: Tuple = str(snake_case )
lowercase__: Optional[Any] = label
# Load the accuracy metric from the datasets package
lowercase__: int = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(snake_case ):
lowercase__: Dict = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=snake_case , references=eval_pred.label_ids )
lowercase__: str = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(snake_case ) , labelaid=snake_case , idalabel=snake_case , finetuning_task='audio-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__: Union[str, Any] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase__: List[Any] = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(snake_case , output_all_columns=snake_case )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase__: str = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(snake_case , output_all_columns=snake_case )
# Initialize our trainer
lowercase__: Any = Trainer(
model=snake_case , args=snake_case , train_dataset=raw_datasets['train'] if training_args.do_train else None , eval_dataset=raw_datasets['eval'] if training_args.do_eval else None , compute_metrics=snake_case , tokenizer=snake_case , )
# Training
if training_args.do_train:
lowercase__: int = None
if training_args.resume_from_checkpoint is not None:
lowercase__: int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__: Union[str, Any] = last_checkpoint
lowercase__: int = trainer.train(resume_from_checkpoint=snake_case )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__: str = trainer.evaluate()
trainer.log_metrics('eval' , snake_case )
trainer.save_metrics('eval' , snake_case )
# Write model card and (optionally) push to hub
lowercase__: Union[str, Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case )
else:
trainer.create_model_card(**snake_case )
if __name__ == "__main__":
main()
| 288 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=1 / 255 , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=True , ) -> List[str]:
'''simple docstring'''
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase__: Dict = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
lowercase__: Tuple = parent
lowercase__: Optional[Any] = batch_size
lowercase__: Any = num_channels
lowercase__: str = min_resolution
lowercase__: Dict = max_resolution
lowercase__: Any = do_resize
lowercase__: str = size
lowercase__: Any = do_rescale
lowercase__: Union[str, Any] = rescale_factor
lowercase__: Optional[int] = do_normalize
lowercase__: Union[str, Any] = image_mean
lowercase__: List[str] = image_std
lowercase__: Optional[Any] = do_pad
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> int:
'''simple docstring'''
if not batched:
lowercase__: List[Any] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
lowercase__ , lowercase__: List[str] = image.size
else:
lowercase__ , lowercase__: str = image.shape[1], image.shape[2]
if w < h:
lowercase__: Optional[int] = int(self.size['shortest_edge'] * h / w )
lowercase__: int = self.size['shortest_edge']
elif w > h:
lowercase__: Tuple = self.size['shortest_edge']
lowercase__: int = int(self.size['shortest_edge'] * w / h )
else:
lowercase__: Tuple = self.size['shortest_edge']
lowercase__: Optional[Any] = self.size['shortest_edge']
else:
lowercase__: str = []
for image in image_inputs:
lowercase__ , lowercase__: Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__: Union[str, Any] = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
lowercase__: Any = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : Tuple = DetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Optional[int] = DetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_std' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_rescale' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'rescale_factor' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_pad' ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
lowercase__: str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
# Initialize image_processing
lowercase__: Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__: str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
lowercase__: Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Tuple = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ , lowercase__: Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
lowercase__: Dict = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
# Initialize image_processing
lowercase__: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__: Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
lowercase__: Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Optional[int] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__: Union[str, Any] = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
# Initialize image_processing
lowercase__: Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__: Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
lowercase__: Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__: Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Optional[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
# prepare image and target
lowercase__: Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowercase__: Optional[int] = json.loads(f.read() )
lowercase__: Optional[Any] = {'image_id': 39_769, 'annotations': target}
# encode them
lowercase__: Optional[Any] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
lowercase__: List[Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors='pt' )
# verify pixel values
lowercase__: Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , lowerCAmelCase__ )
lowercase__: Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
lowercase__: List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowerCAmelCase__ ) )
# verify boxes
lowercase__: Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowerCAmelCase__ )
lowercase__: int = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
lowercase__: List[Any] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowerCAmelCase__ ) )
# verify is_crowd
lowercase__: Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowerCAmelCase__ ) )
# verify class_labels
lowercase__: List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowerCAmelCase__ ) )
# verify orig_size
lowercase__: Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowerCAmelCase__ ) )
# verify size
lowercase__: Tuple = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowerCAmelCase__ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
# prepare image, target and masks_path
lowercase__: List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowercase__: Tuple = json.loads(f.read() )
lowercase__: Tuple = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
lowercase__: List[str] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowercase__: Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
lowercase__: Dict = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors='pt' )
# verify pixel values
lowercase__: Any = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , lowerCAmelCase__ )
lowercase__: Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
lowercase__: str = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowerCAmelCase__ ) )
# verify boxes
lowercase__: Dict = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowerCAmelCase__ )
lowercase__: str = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
lowercase__: Optional[int] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowerCAmelCase__ ) )
# verify is_crowd
lowercase__: List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowerCAmelCase__ ) )
# verify class_labels
lowercase__: Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowerCAmelCase__ ) )
# verify masks
lowercase__: str = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowerCAmelCase__ )
# verify orig_size
lowercase__: Optional[int] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowerCAmelCase__ ) )
# verify size
lowercase__: Optional[int] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowerCAmelCase__ ) )
| 288 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A ) -> int:
"""simple docstring"""
lowercase__ = [[0 for _ in range(A )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowercase__ = 1
for n in range(m + 1 ):
for k in range(1 , A ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowerCamelCase : Any = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
lowerCamelCase : List[Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 2 |
import os
from datetime import datetime as dt
from github import Github
_a = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def _a ( ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase: Dict = Github(os.environ['GITHUB_TOKEN'] )
__lowerCAmelCase: Tuple = g.get_repo('huggingface/accelerate' )
__lowerCAmelCase: str = repo.get_issues(state='open' )
for issue in open_issues:
__lowerCAmelCase: Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda SCREAMING_SNAKE_CASE : i.created_at , reverse=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = comments[0] if len(SCREAMING_SNAKE_CASE ) > 0 else None
__lowerCAmelCase: Tuple = dt.utcnow()
__lowerCAmelCase: Optional[int] = (current_time - issue.updated_at).days
__lowerCAmelCase: str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 322 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = 42
class __UpperCAmelCase ( A__ , A__ ):
'''simple docstring'''
@register_to_config
def __init__(self : Dict , _lowerCAmelCase : int = 32 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 20 , _lowerCAmelCase : int = 768 , _lowerCAmelCase : List[Any]=77 , _lowerCAmelCase : Any=4 , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : str = "silu" , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = "linear" , _lowerCAmelCase : Optional[str] = "prd" , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[int] = None , ):
super().__init__()
A = num_attention_heads
A = attention_head_dim
A = num_attention_heads * attention_head_dim
A = additional_embeddings
A = time_embed_dim or inner_dim
A = embedding_proj_dim or embedding_dim
A = clip_embed_dim or embedding_dim
A = Timesteps(_lowerCAmelCase , _lowerCAmelCase , 0 )
A = TimestepEmbedding(_lowerCAmelCase , _lowerCAmelCase , out_dim=_lowerCAmelCase , act_fn=_lowerCAmelCase )
A = nn.Linear(_lowerCAmelCase , _lowerCAmelCase )
if embedding_proj_norm_type is None:
A = None
elif embedding_proj_norm_type == "layer":
A = nn.LayerNorm(_lowerCAmelCase )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
A = nn.Linear(_lowerCAmelCase , _lowerCAmelCase )
if encoder_hid_proj_type is None:
A = None
elif encoder_hid_proj_type == "linear":
A = nn.Linear(_lowerCAmelCase , _lowerCAmelCase )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
A = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _lowerCAmelCase ) )
if added_emb_type == "prd":
A = nn.Parameter(torch.zeros(1 , 1 , _lowerCAmelCase ) )
elif added_emb_type is None:
A = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
A = nn.ModuleList(
[
BasicTransformerBlock(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dropout=_lowerCAmelCase , activation_fn="""gelu""" , attention_bias=_lowerCAmelCase , )
for d in range(_lowerCAmelCase )
] )
if norm_in_type == "layer":
A = nn.LayerNorm(_lowerCAmelCase )
elif norm_in_type is None:
A = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
A = nn.LayerNorm(_lowerCAmelCase )
A = nn.Linear(_lowerCAmelCase , _lowerCAmelCase )
A = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10_000.0 )
causal_attention_mask.triu_(1 )
A = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , _lowerCAmelCase , persistent=_lowerCAmelCase )
A = nn.Parameter(torch.zeros(1 , _lowerCAmelCase ) )
A = nn.Parameter(torch.zeros(1 , _lowerCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A (self : Optional[int] ):
A = {}
def fn_recursive_add_processors(_lowerCAmelCase : str , _lowerCAmelCase : torch.nn.Module , _lowerCAmelCase : Dict[str, AttentionProcessor] ):
if hasattr(_lowerCAmelCase , """set_processor""" ):
A = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" , _lowerCAmelCase , _lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return processors
def A (self : List[Any] , _lowerCAmelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
A = len(self.attn_processors.keys() )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_lowerCAmelCase )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_lowerCAmelCase : str , _lowerCAmelCase : torch.nn.Module , _lowerCAmelCase : Dict ):
if hasattr(_lowerCAmelCase , """set_processor""" ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
module.set_processor(_lowerCAmelCase )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" , _lowerCAmelCase , _lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def A (self : Optional[int] ):
self.set_attn_processor(AttnProcessor() )
def A (self : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[torch.Tensor, float, int] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : Optional[torch.FloatTensor] = None , _lowerCAmelCase : Optional[torch.BoolTensor] = None , _lowerCAmelCase : bool = True , ):
A = hidden_states.shape[0]
A = timestep
if not torch.is_tensor(_lowerCAmelCase ):
A = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_lowerCAmelCase ) and len(timesteps.shape ) == 0:
A = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A = timesteps * torch.ones(_lowerCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
A = self.time_proj(_lowerCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
A = timesteps_projected.to(dtype=self.dtype )
A = self.time_embedding(_lowerCAmelCase )
if self.embedding_proj_norm is not None:
A = self.embedding_proj_norm(_lowerCAmelCase )
A = self.embedding_proj(_lowerCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
A = self.encoder_hidden_states_proj(_lowerCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
A = self.proj_in(_lowerCAmelCase )
A = self.positional_embedding.to(hidden_states.dtype )
A = []
A = 0
if encoder_hidden_states is not None:
additional_embeds.append(_lowerCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
A = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
A = hidden_states[:, None, :]
A = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
A = self.prd_embedding.to(hidden_states.dtype ).expand(_lowerCAmelCase , -1 , -1 )
additional_embeds.append(_lowerCAmelCase )
A = torch.cat(
_lowerCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
A = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
A = F.pad(
_lowerCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
A = hidden_states + positional_embeddings
if attention_mask is not None:
A = (1 - attention_mask.to(hidden_states.dtype )) * -10_000.0
A = F.pad(_lowerCAmelCase , (0, self.additional_embeddings) , value=0.0 )
A = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
A = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
A = self.norm_in(_lowerCAmelCase )
for block in self.transformer_blocks:
A = block(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
A = self.norm_out(_lowerCAmelCase )
if self.prd_embedding is not None:
A = hidden_states[:, -1]
else:
A = hidden_states[:, additional_embeddings_len:]
A = self.proj_to_clip_embeddings(_lowerCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_lowerCAmelCase )
def A (self : List[str] , _lowerCAmelCase : Any ):
A = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 337 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[Any] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , 0.1 )
A = Accelerator()
A = accelerator.prepare(_lowerCAmelCase )
try:
pickle.loads(pickle.dumps(_lowerCAmelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 337 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE__ = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE__ = {
'squeezebert/squeezebert-uncased': 5_1_2,
'squeezebert/squeezebert-mnli': 5_1_2,
'squeezebert/squeezebert-mnli-headless': 5_1_2,
}
SCREAMING_SNAKE_CASE__ = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class a_ ( lowerCamelCase ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = SqueezeBertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop("""type""" ) )
UpperCamelCase = do_lower_case
UpperCamelCase = strip_accents
UpperCamelCase = tokenize_chinese_chars
UpperCamelCase = normalizer_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = do_lower_case
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
UpperCamelCase = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 321 |
'''simple docstring'''
from __future__ import annotations
import math
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = size
# approximate the overall size of segment tree with given value
UpperCamelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCamelCase = [0 for i in range(0 , 4 * size )]
UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return idx * 2
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return idx * 2 + 1
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if left_element == right_element:
UpperCamelCase = a[left_element - 1]
else:
UpperCamelCase = (left_element + right_element) // 2
self.build(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.build(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = max(
self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = False
if left_element != right_element:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = self.lazy[idx]
UpperCamelCase = True
UpperCamelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCamelCase = val
if left_element != right_element:
UpperCamelCase = val
UpperCamelCase = val
UpperCamelCase = True
UpperCamelCase = True
return True
UpperCamelCase = (left_element + right_element) // 2
self.update(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.update(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = max(
self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] )
return True
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = False
if left_element != right_element:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = self.lazy[idx]
UpperCamelCase = True
UpperCamelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCamelCase = (left_element + right_element) // 2
UpperCamelCase = self.query(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.query(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __str__( self ) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
SCREAMING_SNAKE_CASE__ = 1_5
SCREAMING_SNAKE_CASE__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 321 | 1 |
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
class __A :
'''simple docstring'''
__lowercase: str
__lowercase: str = None
@staticmethod
def lowerCAmelCase ( ) ->Optional[Any]:
"""simple docstring"""
raise NotImplementedError
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : str ) ->List[str]:
"""simple docstring"""
raise NotImplementedError
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Tuple ) ->Dict:
"""simple docstring"""
raise NotImplementedError
def lowerCAmelCase ( self : List[str] ) ->Optional[int]:
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
return F"""`pip install {cls.pip_package or cls.name}`"""
class __A (snake_case__):
'''simple docstring'''
__lowercase: Any = """optuna"""
@staticmethod
def lowerCAmelCase ( ) ->List[Any]:
"""simple docstring"""
return is_optuna_available()
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ) ->Tuple:
"""simple docstring"""
return run_hp_search_optuna(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Any ) ->List[str]:
"""simple docstring"""
return default_hp_space_optuna(UpperCAmelCase_ )
class __A (snake_case__):
'''simple docstring'''
__lowercase: List[Any] = """ray"""
__lowercase: List[str] = """'ray[tune]'"""
@staticmethod
def lowerCAmelCase ( ) ->int:
"""simple docstring"""
return is_ray_available()
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict ) ->int:
"""simple docstring"""
return run_hp_search_ray(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Dict ) ->List[str]:
"""simple docstring"""
return default_hp_space_ray(UpperCAmelCase_ )
class __A (snake_case__):
'''simple docstring'''
__lowercase: Any = """sigopt"""
@staticmethod
def lowerCAmelCase ( ) ->Union[str, Any]:
"""simple docstring"""
return is_sigopt_available()
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any] ) ->Optional[Any]:
"""simple docstring"""
return run_hp_search_sigopt(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[Any] ) ->List[Any]:
"""simple docstring"""
return default_hp_space_sigopt(UpperCAmelCase_ )
class __A (snake_case__):
'''simple docstring'''
__lowercase: Dict = """wandb"""
@staticmethod
def lowerCAmelCase ( ) ->List[Any]:
"""simple docstring"""
return is_wandb_available()
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str] ) ->Optional[Any]:
"""simple docstring"""
return run_hp_search_wandb(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
return default_hp_space_wandb(UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE : Tuple = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def _a ( ) -> str:
snake_case_ = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_SCREAMING_SNAKE_CASE ) > 0:
snake_case_ = available_backends[0].name
if len(_SCREAMING_SNAKE_CASE ) > 1:
logger.info(
f"""{len(_SCREAMING_SNAKE_CASE )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
f""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 350 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__SCREAMING_SNAKE_CASE : Tuple = 16
__SCREAMING_SNAKE_CASE : int = 32
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = "bert-base-cased" ) -> Optional[Any]:
snake_case_ = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case_ = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=_SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
snake_case_ = DataLoader(
tokenized_datasets["""train"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
snake_case_ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
# Initialize accelerator
snake_case_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ = config["""lr"""]
snake_case_ = int(config["""num_epochs"""] )
snake_case_ = int(config["""seed"""] )
snake_case_ = int(config["""batch_size"""] )
snake_case_ = args.model_name_or_path
set_seed(_SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
# Instantiate optimizer
snake_case_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case_ = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
snake_case_ = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
snake_case_ = 1
snake_case_ = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case_ = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , )
else:
snake_case_ = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
snake_case_ = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case_ = 0
# Now we train the model
snake_case_ = evaluate.load("""glue""" , """mrpc""" )
snake_case_ = 0
snake_case_ = {}
for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = model(**_SCREAMING_SNAKE_CASE )
snake_case_ = outputs.loss
snake_case_ = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
snake_case_ = 0
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ = model(**_SCREAMING_SNAKE_CASE )
snake_case_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case_ , snake_case_ = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_SCREAMING_SNAKE_CASE ) - 1:
snake_case_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
snake_case_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _SCREAMING_SNAKE_CASE )
snake_case_ = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
snake_case_ = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( ) -> int:
snake_case_ = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=_SCREAMING_SNAKE_CASE , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
"""--output_dir""" , type=_SCREAMING_SNAKE_CASE , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=3 , help="""Number of train epochs.""" , )
snake_case_ = parser.parse_args()
snake_case_ = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 233 | 0 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCamelCase : Tuple = get_logger(__name__)
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : Tuple , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = (
os.path.join(UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowercase__ = Extractor
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowercase__ = os.path.abspath(UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase ) )
def UpperCamelCase__ (self : List[str] , UpperCamelCase : str , UpperCamelCase : bool ):
'''simple docstring'''
return force_extract or (
not os.path.isfile(UpperCamelCase ) and not (os.path.isdir(UpperCamelCase ) and os.listdir(UpperCamelCase ))
)
def UpperCamelCase__ (self : Dict , UpperCamelCase : str , UpperCamelCase : bool = False ):
'''simple docstring'''
lowercase__ = self.extractor.infer_extractor_format(UpperCamelCase )
if not extractor_format:
return input_path
lowercase__ = self._get_output_path(UpperCamelCase )
if self._do_extract(UpperCamelCase , UpperCamelCase ):
self.extractor.extract(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return output_path
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
@classmethod
@abstractmethod
def UpperCamelCase__ (cls : Any , UpperCamelCase : Union[Path, str] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
...
@staticmethod
@abstractmethod
def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ):
'''simple docstring'''
...
class __lowerCAmelCase (lowercase_ , lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[bytes] = []
@staticmethod
def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : int ):
'''simple docstring'''
with open(UpperCamelCase , '''rb''' ) as f:
return f.read(UpperCamelCase )
@classmethod
def UpperCamelCase__ (cls : int , UpperCamelCase : Union[Path, str] , UpperCamelCase : bytes = b"" ):
'''simple docstring'''
if not magic_number:
lowercase__ = max(len(UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
lowercase__ = cls.read_magic_number(UpperCamelCase , UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
@classmethod
def UpperCamelCase__ (cls : Optional[int] , UpperCamelCase : Union[Path, str] , **UpperCamelCase : List[Any] ):
'''simple docstring'''
return tarfile.is_tarfile(UpperCamelCase )
@staticmethod
def UpperCamelCase__ (UpperCamelCase : List[str] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
def resolved(UpperCamelCase : str ) -> str:
return os.path.realpath(os.path.abspath(UpperCamelCase ) )
def badpath(UpperCamelCase : str , UpperCamelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCamelCase , UpperCamelCase ) ).startswith(UpperCamelCase )
def badlink(UpperCamelCase : int , UpperCamelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
lowercase__ = resolved(os.path.join(UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=UpperCamelCase )
lowercase__ = resolved(UpperCamelCase )
for finfo in members:
if badpath(finfo.name , UpperCamelCase ):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(UpperCamelCase , UpperCamelCase ):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(UpperCamelCase , UpperCamelCase ):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ):
'''simple docstring'''
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
lowercase__ = tarfile.open(UpperCamelCase )
tar_file.extractall(UpperCamelCase , members=TarExtractor.safemembers(UpperCamelCase , UpperCamelCase ) )
tar_file.close()
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = [B"""\x1F\x8B"""]
@staticmethod
def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ):
'''simple docstring'''
with gzip.open(UpperCamelCase , '''rb''' ) as gzip_file:
with open(UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCamelCase__ (cls : Tuple , UpperCamelCase : Union[Path, str] , UpperCamelCase : bytes = b"" ):
'''simple docstring'''
if super().is_extractable(UpperCamelCase , magic_number=UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCamelCase , '''rb''' ) as fp:
lowercase__ = _EndRecData(UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowercase__ = fp.read(UpperCamelCase ) # CD is where we expect it to be
if len(UpperCamelCase ) == sizeCentralDir:
lowercase__ = struct.unpack(UpperCamelCase , UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ):
'''simple docstring'''
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with zipfile.ZipFile(UpperCamelCase , '''r''' ) as zip_file:
zip_file.extractall(UpperCamelCase )
zip_file.close()
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Dict = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ):
'''simple docstring'''
with lzma.open(UpperCamelCase ) as compressed_file:
with open(UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Dict = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ):
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
lowercase__ = rarfile.RarFile(UpperCamelCase )
rf.extractall(UpperCamelCase )
rf.close()
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ):
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
lowercase__ = zstd.ZstdDecompressor()
with open(UpperCamelCase , '''rb''' ) as ifh, open(UpperCamelCase , '''wb''' ) as ofh:
dctx.copy_stream(UpperCamelCase , UpperCamelCase )
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ):
'''simple docstring'''
with bza.open(UpperCamelCase , '''rb''' ) as compressed_file:
with open(UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ):
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with pyazr.SevenZipFile(UpperCamelCase , '''r''' ) as archive:
archive.extractall(UpperCamelCase )
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ):
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(UpperCamelCase , '''rb''' ) as compressed_file:
with open(UpperCamelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCamelCase__ (cls : Optional[Any] ):
'''simple docstring'''
return max(
len(UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(UpperCamelCase , UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : int ):
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase , magic_number_length=UpperCamelCase )
except OSError:
return b""
@classmethod
def UpperCamelCase__ (cls : Any , UpperCamelCase : Union[Path, str] , UpperCamelCase : bool = False ):
'''simple docstring'''
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=UpperCamelCase , )
lowercase__ = cls.infer_extractor_format(UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCamelCase__ (cls : int , UpperCamelCase : Union[Path, str] ): # <Added version="2.4.0"/>
'''simple docstring'''
lowercase__ = cls._get_magic_number_max_length()
lowercase__ = cls._read_magic_number(UpperCamelCase , UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCamelCase , magic_number=UpperCamelCase ):
return extractor_format
@classmethod
def UpperCamelCase__ (cls : Optional[Any] , UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[BaseExtractor] = "deprecated" , ):
'''simple docstring'''
os.makedirs(os.path.dirname(UpperCamelCase ) , exist_ok=UpperCamelCase )
# Prevent parallel extractions
lowercase__ = str(Path(UpperCamelCase ).with_suffix('''.lock''' ) )
with FileLock(UpperCamelCase ):
shutil.rmtree(UpperCamelCase , ignore_errors=UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCamelCase , UpperCamelCase ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=UpperCamelCase , )
lowercase__ = extractor if extractor != '''deprecated''' else extractor_format
else:
lowercase__ = cls.extractors[extractor_format]
return extractor.extract(UpperCamelCase , UpperCamelCase )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCamelCase ):
return extractor.extract(UpperCamelCase , UpperCamelCase )
| 2 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE_ : int = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
SCREAMING_SNAKE_CASE_ : List[Any] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def _snake_case ( ):
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def _snake_case ( ):
A__ = """rougeLsum"""
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _snake_case ( ):
A__ = ["""rouge1""", """rouge2""", """rougeL"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
assert score_sep == score_no_sep
def _snake_case ( ):
A__ = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
A__ = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ )
def _snake_case ( ):
A__ = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
A__ = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase_ )["""rougeLsum"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def _snake_case ( ):
A__ = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
A__ = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 0 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , *__a , **__a ) -> None:
"""simple docstring"""
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __a , )
super().__init__(*__a , **__a )
| 335 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCamelCase )
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , **__a ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__a )
requires_backends(self , 'vision' )
requires_backends(self , 'torch' )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(__a )
def UpperCamelCase__ (self , **__a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCAmelCase__ = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
UpperCAmelCase__ = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
UpperCAmelCase__ = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
UpperCAmelCase__ = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
UpperCAmelCase__ = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCAmelCase__ = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
UpperCAmelCase__ = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
UpperCAmelCase__ = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
UpperCAmelCase__ = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
UpperCAmelCase__ = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
UpperCAmelCase__ = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
UpperCAmelCase__ = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , __a , *__a , __a=None , __a=None , **__a ) -> List[str]:
"""simple docstring"""
return super().__call__(__a , *__a , num_workers=__a , batch_size=__a , **__a )
def UpperCamelCase__ (self , __a , __a=64 , __a = 0 , __a = 512 / 1500 , __a = 32 , __a = 1 , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = load_image(__a )
UpperCAmelCase__ = self.image_processor.size['longest_edge']
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.generate_crop_boxes(
__a , __a , __a , __a , __a , __a )
UpperCAmelCase__ = self.image_processor(images=__a , return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
UpperCAmelCase__ = self.get_inference_context()
with inference_context():
UpperCAmelCase__ = self._ensure_tensor_on_device(__a , device=self.device )
UpperCAmelCase__ = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
UpperCAmelCase__ = image_embeddings
UpperCAmelCase__ = grid_points.shape[1]
UpperCAmelCase__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0 , __a , __a ):
UpperCAmelCase__ = grid_points[:, i : i + points_per_batch, :, :]
UpperCAmelCase__ = input_labels[:, i : i + points_per_batch]
UpperCAmelCase__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase__ (self , __a , __a=0.88 , __a=0.95 , __a=0 , __a=1 , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = model_inputs.pop('input_boxes' )
UpperCAmelCase__ = model_inputs.pop('is_last' )
UpperCAmelCase__ = model_inputs.pop('original_sizes' ).tolist()
UpperCAmelCase__ = model_inputs.pop('reshaped_input_sizes' ).tolist()
UpperCAmelCase__ = self.model(**__a )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCAmelCase__ = model_outputs['pred_masks']
UpperCAmelCase__ = self.image_processor.post_process_masks(
__a , __a , __a , __a , binarize=__a )
UpperCAmelCase__ = model_outputs['iou_scores']
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __a , __a , __a , __a , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase__ (self , __a , __a=False , __a=False , __a=0.7 , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
UpperCAmelCase__ = torch.cat(__a )
UpperCAmelCase__ = torch.cat(__a )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.post_process_for_mask_generation(
__a , __a , __a , __a )
UpperCAmelCase__ = defaultdict(__a )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__a )
UpperCAmelCase__ = {}
if output_rle_mask:
UpperCAmelCase__ = rle_mask
if output_bboxes_mask:
UpperCAmelCase__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 335 | 1 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def UpperCamelCase( __UpperCamelCase : Dict ,__UpperCamelCase : Dict ):
lowerCAmelCase_ : Optional[Any] = k_size // 2
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCAmelCase_ : List[str] = 1 / (2 * pi * sigma) * exp(-(square(__UpperCamelCase ) + square(__UpperCamelCase )) / (2 * square(__UpperCamelCase )) )
return g
def UpperCamelCase( __UpperCamelCase : Tuple ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[str] ):
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = image.shape[0], image.shape[1]
# dst image height and width
lowerCAmelCase_ : Union[str, Any] = height - k_size + 1
lowerCAmelCase_ : Optional[Any] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCAmelCase_ : Optional[Any] = zeros((dst_height * dst_width, k_size * k_size) )
lowerCAmelCase_ : Union[str, Any] = 0
for i, j in product(range(__UpperCamelCase ) ,range(__UpperCamelCase ) ):
lowerCAmelCase_ : List[Any] = ravel(image[i : i + k_size, j : j + k_size] )
lowerCAmelCase_ : List[Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCAmelCase_ : Dict = gen_gaussian_kernel(__UpperCamelCase ,__UpperCamelCase )
lowerCAmelCase_ : str = ravel(__UpperCamelCase )
# reshape and get the dst image
lowerCAmelCase_ : Dict = dot(__UpperCamelCase ,__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase ).astype(__UpperCamelCase )
return dst
if __name__ == "__main__":
# read original image
A__ : List[Any] = imread(R'''../image_data/lena.jpg''')
# turn image in gray scale value
A__ : Any = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
A__ : List[str] = gaussian_filter(gray, 3, sigma=1)
A__ : Any = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 103 | '''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a: List[str] = logging.get_logger(__name__)
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "encoder-decoder"
SCREAMING_SNAKE_CASE = True
def __init__( self , **__lowerCAmelCase ) -> int:
super().__init__(**__lowerCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowercase__ : Optional[int] = kwargs.pop('''encoder''' )
lowercase__ : Union[str, Any] = encoder_config.pop('''model_type''' )
lowercase__ : Any = kwargs.pop('''decoder''' )
lowercase__ : Any = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowercase__ : Union[str, Any] = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : Optional[Any] = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : Tuple = True
@classmethod
def _lowerCAmelCase( cls , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) -> PretrainedConfig:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowercase__ : Union[str, Any] = True
lowercase__ : Any = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Any:
lowercase__ : Any = copy.deepcopy(self.__dict__ )
lowercase__ : Optional[Any] = self.encoder.to_dict()
lowercase__ : Tuple = self.decoder.to_dict()
lowercase__ : Dict = self.__class__.model_type
return output
| 198 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCamelCase_( ):
"""simple docstring"""
__a ='https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
__a =Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert('RGB' )
return image
def UpperCamelCase_( _snake_case : Dict ):
"""simple docstring"""
__a =[]
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : Dict , _snake_case : List[Any] ):
"""simple docstring"""
__a =dct.pop(_snake_case )
__a =val
def UpperCamelCase_( _snake_case : int , _snake_case : Any ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__a =state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
__a =state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
__a =torch.cat((q_bias, torch.zeros_like(_snake_case , requires_grad=_snake_case ), v_bias) )
__a =qkv_bias
def UpperCamelCase_( _snake_case : Dict , _snake_case : Union[str, Any] ):
"""simple docstring"""
__a =364 if 'coco' in model_name else 224
__a =BlipaVisionConfig(image_size=_snake_case ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__a =OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_snake_case ).to_dict()
elif "opt-6.7b" in model_name:
__a =OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_snake_case ).to_dict()
elif "t5-xl" in model_name:
__a =TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__a =TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
__a =BlipaConfig(vision_config=_snake_case , text_config=_snake_case )
return config, image_size
@torch.no_grad()
def UpperCamelCase_( _snake_case : List[str] , _snake_case : Union[str, Any]=None , _snake_case : Optional[int]=False ):
"""simple docstring"""
__a =(
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
__a =tokenizer('\n' , add_special_tokens=_snake_case ).input_ids[0]
__a , __a =get_blipa_config(_snake_case , eos_token_id=_snake_case )
__a =BlipaForConditionalGeneration(_snake_case ).eval()
__a ={
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
__a , __a =model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__a ='cuda' if torch.cuda.is_available() else 'cpu'
__a , __a , __a =load_model_and_preprocess(
name=_snake_case , model_type=_snake_case , is_eval=_snake_case , device=_snake_case )
original_model.eval()
print('Done!' )
# update state dict keys
__a =original_model.state_dict()
__a =create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__a =state_dict.pop(_snake_case )
if key.startswith('Qformer.bert' ):
__a =key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__a =key.replace('self' , 'attention' )
if "opt_proj" in key:
__a =key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
__a =key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
__a =key.replace('opt' , 'language' )
if key.startswith('t5' ):
__a =key.replace('t5' , 'language' )
__a =val
# read in qv biases
read_in_q_v_bias(_snake_case , _snake_case )
__a , __a =hf_model.load_state_dict(_snake_case , strict=_snake_case )
assert len(_snake_case ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__a =load_demo_image()
__a =vis_processors['eval'](_snake_case ).unsqueeze(0 ).to(_snake_case )
__a =tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_snake_case )
# create processor
__a =BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_snake_case , image_std=_snake_case )
__a =BlipaProcessor(image_processor=_snake_case , tokenizer=_snake_case )
__a =processor(images=_snake_case , return_tensors='pt' ).pixel_values.to(_snake_case )
# make sure processor creates exact same pixel values
assert torch.allclose(_snake_case , _snake_case )
original_model.to(_snake_case )
hf_model.to(_snake_case )
with torch.no_grad():
if "opt" in model_name:
__a =original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
__a =hf_model(_snake_case , _snake_case ).logits
else:
__a =original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
__a =input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__a =hf_model(_snake_case , _snake_case , labels=_snake_case ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__a =torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=_snake_case )
assert torch.allclose(logits[0, :3, :3] , _snake_case , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__a =torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=_snake_case )
else:
# cast to same type
__a =logits.dtype
assert torch.allclose(original_logits.to(_snake_case ) , _snake_case , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
__a =''
__a =tokenizer(_snake_case , return_tensors='pt' ).input_ids.to(_snake_case )
__a =original_model.generate({'image': original_pixel_values} )
__a =hf_model.generate(
_snake_case , _snake_case , do_sample=_snake_case , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , _snake_case )
__a =input_ids.shape[1]
__a =processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_snake_case )
__a =[text.strip() for text in output_text]
print('HF generation:' , _snake_case )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_snake_case )
hf_model.save_pretrained(_snake_case )
if push_to_hub:
processor.push_to_hub(F'nielsr/{model_name}' )
hf_model.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
_lowerCAmelCase : List[Any] = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_lowerCAmelCase : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 308 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __magic_name__ ( pl.LightningModule ):
def __init__( self , __snake_case ) -> List[Any]:
'''simple docstring'''
super().__init__()
__a =model
__a =2
__a =nn.Linear(self.model.config.hidden_size , self.num_labels )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCamelCase_( _snake_case : str , _snake_case : str , _snake_case : str ):
"""simple docstring"""
__a =LongformerModel.from_pretrained(_snake_case )
__a =LightningModel(_snake_case )
__a =torch.load(_snake_case , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
__a =LongformerForQuestionAnswering.from_pretrained(_snake_case )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_snake_case )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 308 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : str = logging.get_logger(__name__)
UpperCAmelCase : str = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : List[Any] = """megatron-bert"""
def __init__( self , lowerCAmelCase__=2_9_0_5_6 , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=2_4 , lowerCAmelCase__=1_6 , lowerCAmelCase__=4_0_9_6 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Optional[int] =vocab_size
a__ : Any =hidden_size
a__ : List[Any] =num_hidden_layers
a__ : Optional[int] =num_attention_heads
a__ : List[str] =hidden_act
a__ : Union[str, Any] =intermediate_size
a__ : Optional[int] =hidden_dropout_prob
a__ : Tuple =attention_probs_dropout_prob
a__ : List[str] =max_position_embeddings
a__ : Optional[int] =type_vocab_size
a__ : int =initializer_range
a__ : Any =layer_norm_eps
a__ : List[Any] =position_embedding_type
a__ : Dict =use_cache
| 95 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = 384
lowerCAmelCase_ = 7
if "tiny" in model_name:
lowerCAmelCase_ = 96
lowerCAmelCase_ = (2, 2, 6, 2)
lowerCAmelCase_ = (3, 6, 12, 24)
elif "small" in model_name:
lowerCAmelCase_ = 96
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (3, 6, 12, 24)
elif "base" in model_name:
lowerCAmelCase_ = 128
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (4, 8, 16, 32)
lowerCAmelCase_ = 12
lowerCAmelCase_ = 512
elif "large" in model_name:
lowerCAmelCase_ = 192
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (6, 12, 24, 48)
lowerCAmelCase_ = 12
lowerCAmelCase_ = 768
# set label information
lowerCAmelCase_ = 150
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''ade20k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = SwinConfig(
embed_dim=_A , depths=_A , num_heads=_A , window_size=_A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
lowerCAmelCase_ = UperNetConfig(
backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , )
return config
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = dct.pop(_A )
lowerCAmelCase_ = val
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" )
lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[:dim, :]
lowerCAmelCase_ = in_proj_bias[: dim]
lowerCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
lowerCAmelCase_ = in_proj_bias[
dim : dim * 2
]
lowerCAmelCase_ = in_proj_weight[
-dim :, :
]
lowerCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = x.shape
lowerCAmelCase_ = x.reshape(_A , 4 , in_channel // 4 )
lowerCAmelCase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_A , _A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = x.shape
lowerCAmelCase_ = x.reshape(_A , in_channel // 4 , 4 )
lowerCAmelCase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_A , _A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = x.shape[0]
lowerCAmelCase_ = x.reshape(4 , in_channel // 4 )
lowerCAmelCase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = x.shape[0]
lowerCAmelCase_ = x.reshape(in_channel // 4 , 4 )
lowerCAmelCase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_A )
return x
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
lowerCAmelCase_ = model_name_to_url[model_name]
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , file_name=_A )[
'''state_dict'''
]
for name, param in state_dict.items():
print(_A , param.shape )
lowerCAmelCase_ = get_upernet_config(_A )
lowerCAmelCase_ = UperNetForSemanticSegmentation(_A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCAmelCase_ = state_dict.pop(_A )
if "bn" in key:
lowerCAmelCase_ = key.replace('''bn''' , '''batch_norm''' )
lowerCAmelCase_ = val
# rename keys
lowerCAmelCase_ = create_rename_keys(_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowerCAmelCase_ = reverse_correct_unfold_reduction_order(_A )
if "norm" in key:
lowerCAmelCase_ = reverse_correct_unfold_norm_order(_A )
model.load_state_dict(_A )
# verify on image
lowerCAmelCase_ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' )
lowerCAmelCase_ = SegformerImageProcessor()
lowerCAmelCase_ = processor(_A , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
lowerCAmelCase_ = model(_A )
lowerCAmelCase_ = outputs.logits
print(logits.shape )
print('''First values of logits:''' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowerCAmelCase_ = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] )
elif model_name == "upernet-swin-small":
lowerCAmelCase_ = torch.tensor(
[[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] )
elif model_name == "upernet-swin-base":
lowerCAmelCase_ = torch.tensor(
[[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] )
elif model_name == "upernet-swin-large":
lowerCAmelCase_ = torch.tensor(
[[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_A )
if push_to_hub:
print(f"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(f"openmmlab/{model_name}" )
processor.push_to_hub(f"openmmlab/{model_name}" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[f"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 278 | 0 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__snake_case = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=None )-> Any:
'''simple docstring'''
require_version(deps[pkg] , __lowerCAmelCase )
| 78 | import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''vocab.json'''}
__snake_case = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
__snake_case = {'''mgp-str''': 27}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , snake_case__ , snake_case__="[GO]" , snake_case__="[GO]" , snake_case__="[s]" , snake_case__="[GO]" , **snake_case__ ) -> Any:
'''simple docstring'''
super().__init__(
unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , **snake_case__ , )
with open(snake_case__ , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase : int =json.load(snake_case__ )
UpperCAmelCase : List[str] ={v: k for k, v in self.vocab.items()}
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return len(self.vocab )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =[]
for s in text:
char_tokens.extend(snake_case__ )
return char_tokens
def UpperCAmelCase__ ( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
return self.vocab.get(snake_case__ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(snake_case__ ) )
return
UpperCAmelCase : List[Any] =os.path.join(
snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + '''\n''' )
return (vocab_file,)
| 78 | 1 |
from collections import namedtuple
lowerCamelCase__ = namedtuple("""from_to""", """from_ to""")
lowerCamelCase__ = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 1000),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.0_0454, 264.172),
"""cubicyard""": from_to(0.7_6455, 1.3_0795),
"""cubicfoot""": from_to(0.028, 35.3147),
"""cup""": from_to(0.0_0023_6588, 4226.75),
}
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'from_type' value: {from_type!r} Supported values are:\n"
+ """, """.join(_SCREAMING_SNAKE_CASE ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n"
+ """, """.join(_SCREAMING_SNAKE_CASE ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302 | 1 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , **UpperCamelCase_ ):
requires_backends(self , ['''bs4'''] )
super().__init__(**UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Optional[int] = []
lowercase_ :Union[str, Any] = []
lowercase_ :Union[str, Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
lowercase_ :Any = parent.find_all(child.name , recursive=UpperCamelCase_ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(UpperCamelCase_ ) else next(i for i, s in enumerate(UpperCamelCase_ , 1 ) if s is child ) )
lowercase_ :str = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Dict = BeautifulSoup(UpperCamelCase_ , '''html.parser''' )
lowercase_ :Union[str, Any] = []
lowercase_ :Union[str, Any] = []
lowercase_ :List[Any] = []
for element in html_code.descendants:
if type(UpperCamelCase_ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
lowercase_ :Dict = html.unescape(UpperCamelCase_ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCamelCase_ )
lowercase_ , lowercase_ :Tuple = self.xpath_soup(UpperCamelCase_ )
stringaxtag_seq.append(UpperCamelCase_ )
stringaxsubs_seq.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Union[str, Any] = ''''''
for tagname, subs in zip(UpperCamelCase_ , UpperCamelCase_ ):
xpath += f"/{tagname}"
if subs != 0:
xpath += f"[{subs}]"
return xpath
def __call__( self , UpperCamelCase_ ):
lowercase_ :Dict = False
# Check that strings has a valid type
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Optional[Any] = True
elif isinstance(UpperCamelCase_ , (list, tuple) ):
if len(UpperCamelCase_ ) == 0 or isinstance(html_strings[0] , UpperCamelCase_ ):
lowercase_ :Tuple = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f"but is of type {type(UpperCamelCase_ )}." )
lowercase_ :List[Any] = bool(isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(html_strings[0] , UpperCamelCase_ )) )
if not is_batched:
lowercase_ :Dict = [html_strings]
# Get nodes + xpaths
lowercase_ :List[Any] = []
lowercase_ :List[str] = []
for html_string in html_strings:
lowercase_ , lowercase_ , lowercase_ :List[str] = self.get_three_from_single(UpperCamelCase_ )
nodes.append(UpperCamelCase_ )
lowercase_ :str = []
for node, tag_list, sub_list in zip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :str = self.construct_xpath(UpperCamelCase_ , UpperCamelCase_ )
xpath_strings.append(UpperCamelCase_ )
xpaths.append(UpperCamelCase_ )
# return as Dict
lowercase_ :int = {'''nodes''': nodes, '''xpaths''': xpaths}
lowercase_ :Optional[int] = BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
return encoded_inputs
| 252 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self ):
lowercase_ :List[Any] = 1
lowercase_ :List[Any] = 3
lowercase_ :str = (32, 32)
lowercase_ :Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCamelCase_ )
@property
def UpperCamelCase ( self ):
def extract(*UpperCamelCase_ , **UpperCamelCase_ ):
class UpperCamelCase :
'''simple docstring'''
def __init__( self ):
lowercase_ :Dict = torch.ones([0] )
def UpperCamelCase ( self , UpperCamelCase_ ):
self.pixel_values.to(UpperCamelCase_ )
return self
return Out()
return extract
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ :List[Any] = self.dummy_cond_unet
lowercase_ :int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
lowercase_ :Any = self.dummy_vae
lowercase_ :Dict = self.dummy_text_encoder
lowercase_ :str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowercase_ :Optional[Any] = StableDiffusionPipeline(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , )
lowercase_ :Union[str, Any] = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :str = '''A painting of a squirrel eating a burger'''
lowercase_ :int = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Optional[Any] = sd_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
lowercase_ :Any = output.images
lowercase_ :List[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :List[Any] = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=UpperCamelCase_ , )[0]
lowercase_ :Dict = image[0, -3:, -3:, -1]
lowercase_ :List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ :List[Any] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ :List[str] = self.dummy_cond_unet
lowercase_ :Optional[Any] = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
lowercase_ :Optional[Any] = self.dummy_vae
lowercase_ :List[Any] = self.dummy_text_encoder
lowercase_ :str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowercase_ :Tuple = StableDiffusionPipeline(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , )
lowercase_ :Optional[int] = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :str = '''A painting of a squirrel eating a burger'''
lowercase_ :Any = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Optional[int] = sd_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
lowercase_ :Optional[Any] = output.images
lowercase_ :List[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :List[str] = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=UpperCamelCase_ , )[0]
lowercase_ :Dict = image[0, -3:, -3:, -1]
lowercase_ :str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ :Dict = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :List[str] = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(pipe.scheduler , UpperCamelCase_ )
assert pipe.safety_checker is None
lowercase_ :Optional[int] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase_ )
lowercase_ :Union[str, Any] = StableDiffusionPipeline.from_pretrained(UpperCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase_ :List[Any] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.dummy_cond_unet
lowercase_ :Any = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
lowercase_ :int = self.dummy_vae
lowercase_ :Tuple = self.dummy_text_encoder
lowercase_ :Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
lowercase_ :Optional[int] = unet.half()
lowercase_ :Union[str, Any] = vae.half()
lowercase_ :Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
lowercase_ :Any = StableDiffusionPipeline(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , )
lowercase_ :Dict = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :List[str] = '''A painting of a squirrel eating a burger'''
lowercase_ :List[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=UpperCamelCase_ )
lowercase_ :Dict = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase_ :List[Any] = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :List[Any] = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
lowercase_ :str = 40_0366_0346
lowercase_ :Optional[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
lowercase_ :Tuple = torch.manual_seed(UpperCamelCase_ )
lowercase_ :int = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase_ :List[str] = output.images
lowercase_ :int = image[0, -3:, -3:, -1]
lowercase_ :str = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
lowercase_ :Dict = torch.manual_seed(UpperCamelCase_ )
lowercase_ :Any = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase_ :int = output.images
lowercase_ :Union[str, Any] = image[0, -3:, -3:, -1]
lowercase_ :Optional[int] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :Tuple = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=UpperCamelCase_ )
lowercase_ :List[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase_ :Dict = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :Optional[int] = '''padme amidala taking a bath artwork, safe for work, no nudity'''
lowercase_ :Any = 27_3497_1755
lowercase_ :str = 7
lowercase_ :Optional[Any] = torch.manual_seed(UpperCamelCase_ )
lowercase_ :Tuple = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase_ :Optional[Any] = output.images
lowercase_ :str = image[0, -3:, -3:, -1]
lowercase_ :int = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
lowercase_ :Any = torch.manual_seed(UpperCamelCase_ )
lowercase_ :List[Any] = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase_ :List[str] = output.images
lowercase_ :Optional[Any] = image[0, -3:, -3:, -1]
lowercase_ :Optional[Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :Tuple = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
lowercase_ :Tuple = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :List[str] = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
lowercase_ :Any = 10_4435_5234
lowercase_ :Union[str, Any] = 12
lowercase_ :str = torch.manual_seed(UpperCamelCase_ )
lowercase_ :str = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase_ :Optional[int] = output.images
lowercase_ :str = image[0, -3:, -3:, -1]
lowercase_ :Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
lowercase_ :Dict = torch.manual_seed(UpperCamelCase_ )
lowercase_ :Optional[Any] = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase_ :Optional[Any] = output.images
lowercase_ :List[Any] = image[0, -3:, -3:, -1]
lowercase_ :Any = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 252 | 1 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowercase_ = get_logger(__name__)
class __UpperCamelCase :
"""simple docstring"""
lowerCAmelCase_ = '''dummy_data'''
lowerCAmelCase_ = '''datasets'''
lowerCAmelCase_ = False
def __init__( self : List[Any] , _A : str , _A : str , _A : Union[Version, str] , _A : Optional[str] = None , _A : bool = False , _A : bool = True , _A : Optional[List[Callable]] = None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Dict = dataset_name
__SCREAMING_SNAKE_CASE : Optional[int] = cache_dir
__SCREAMING_SNAKE_CASE : str = use_local_dummy_data
__SCREAMING_SNAKE_CASE : int = config
# download_callbacks take a single url as input
__SCREAMING_SNAKE_CASE : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__SCREAMING_SNAKE_CASE : Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__SCREAMING_SNAKE_CASE : Tuple = str(_A )
# to be downloaded
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : List[str] = None
@property
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
if self._dummy_file is None:
__SCREAMING_SNAKE_CASE : int = self.download_dummy_data()
return self._dummy_file
@property
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__SCREAMING_SNAKE_CASE : Optional[Any] = cached_path(
_A , cache_dir=self.cache_dir , extract_compressed_file=_A , force_extract=_A )
return os.path.join(_A , self.dummy_file_name )
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
if self._bucket_url is None:
__SCREAMING_SNAKE_CASE : List[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def UpperCAmelCase__ ( self : int , _A : Union[str, Any] , *_A : Dict ):
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__SCREAMING_SNAKE_CASE : str = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_A , _A ):
return self.create_dummy_data_dict(_A , _A )
elif isinstance(_A , (list, tuple) ):
return self.create_dummy_data_list(_A , _A )
else:
return self.create_dummy_data_single(_A , _A )
def UpperCAmelCase__ ( self : Dict , _A : str , *_A : List[Any] ):
"""simple docstring"""
return self.download_and_extract(_A )
def UpperCAmelCase__ ( self : Union[str, Any] , _A : Tuple , _A : Any ):
"""simple docstring"""
return self.download_and_extract(_A )
def UpperCAmelCase__ ( self : List[Any] , _A : str , *_A : Any , **_A : int ):
"""simple docstring"""
return path
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
return {}
def UpperCAmelCase__ ( self : Tuple , _A : str , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_A , _A ):
for single_url in single_urls:
download_callback(_A )
else:
__SCREAMING_SNAKE_CASE : int = single_urls
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = [os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) for x in single_urls]
else:
__SCREAMING_SNAKE_CASE : str = single_urls
__SCREAMING_SNAKE_CASE : int = os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) )
__SCREAMING_SNAKE_CASE : int = value
# make sure that values are unique
if all(isinstance(_A , _A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__SCREAMING_SNAKE_CASE : Dict = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCAmelCase__ ( self : Any , _A : Dict , _A : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__SCREAMING_SNAKE_CASE : int = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , _A ) ) for url in data_url )
__SCREAMING_SNAKE_CASE : int = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__SCREAMING_SNAKE_CASE : List[str] = [data_url[0]] * len(_A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__SCREAMING_SNAKE_CASE : Any = os.path.join(_A , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(_A )
return dummy_data_list
def UpperCAmelCase__ ( self : List[Any] , _A : List[str] , _A : Optional[int] ):
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(_A , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(_A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : str , _A : Dict ):
"""simple docstring"""
def _iter_archive_members(_A : List[Any] ):
# this preserves the order of the members inside the ZIP archive
__SCREAMING_SNAKE_CASE : int = Path(self.dummy_file ).parent
__SCREAMING_SNAKE_CASE : Any = path.relative_to(_A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__SCREAMING_SNAKE_CASE : Optional[Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = Path(_A )
__SCREAMING_SNAKE_CASE : Tuple = _iter_archive_members(_A ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(_A ).as_posix(), file_path.open('''rb''' )
def UpperCAmelCase__ ( self : List[Any] , _A : str ):
"""simple docstring"""
if not isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Tuple = [paths]
for path in paths:
if os.path.isfile(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(_A ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(_A , _A )
| 303 |
import sys
from collections import defaultdict
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = []
def UpperCAmelCase__ ( self : List[str] , _A : str ):
"""simple docstring"""
return self.node_position[vertex]
def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = pos
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, Any] , _A : List[Any] , _A : List[str] , _A : Union[str, Any] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__SCREAMING_SNAKE_CASE : List[Any] = 2 * start + 1
else:
__SCREAMING_SNAKE_CASE : Dict = 2 * start + 2
if heap[smallest_child] < heap[start]:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = heap[smallest_child], positions[smallest_child]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = (
heap[start],
positions[start],
)
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = temp, tempa
__SCREAMING_SNAKE_CASE : Any = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCAmelCase__ ( self : Any , _A : Union[str, Any] , _A : Dict , _A : Optional[Any] , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = position[index]
while index != 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__SCREAMING_SNAKE_CASE : Optional[Any] = heap[parent]
__SCREAMING_SNAKE_CASE : str = position[parent]
self.set_position(position[parent] , _A )
else:
__SCREAMING_SNAKE_CASE : List[str] = val
__SCREAMING_SNAKE_CASE : List[str] = temp
self.set_position(_A , _A )
break
__SCREAMING_SNAKE_CASE : List[Any] = parent
else:
__SCREAMING_SNAKE_CASE : Tuple = val
__SCREAMING_SNAKE_CASE : List[str] = temp
self.set_position(_A , 0 )
def UpperCAmelCase__ ( self : List[str] , _A : Tuple , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCAmelCase__ ( self : List[str] , _A : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = positions[0]
__SCREAMING_SNAKE_CASE : Tuple = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = Heap()
__SCREAMING_SNAKE_CASE : int = [0] * len(snake_case )
__SCREAMING_SNAKE_CASE : Dict = [-1] * len(snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__SCREAMING_SNAKE_CASE : Dict = [] # Heap of Distance of vertices from their neighboring vertex
__SCREAMING_SNAKE_CASE : Optional[int] = []
for vertex in range(len(snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case )
heap.node_position.append(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : str = 1
__SCREAMING_SNAKE_CASE : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Dict = distance
heap.heapify(snake_case , snake_case )
for _ in range(1 , len(snake_case ) ):
__SCREAMING_SNAKE_CASE : Tuple = heap.delete_minimum(snake_case , snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__SCREAMING_SNAKE_CASE : List[Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case )]
):
__SCREAMING_SNAKE_CASE : int = distance
heap.bottom_to_top(
snake_case , heap.get_position(snake_case ) , snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Any = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowercase_ = int(input("""Enter number of edges: """).strip())
lowercase_ = defaultdict(list)
for _ in range(edges_number):
lowercase_ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 303 | 1 |
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int ) -> list[int]:
if length <= 0 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError("""Length must be a positive integer.""" )
return [n * (2 * n - 1) for n in range(UpperCAmelCase__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 350 | '''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> List[Any]:
# Initialise PyTorch model
lowercase_ : List[str] = FunnelConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase_ : Dict = FunnelBaseModel(UpperCAmelCase__ ) if base_model else FunnelModel(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_lowercase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 21 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Any = ['''input_features''', '''attention_mask''']
def __init__( self : Union[str, Any] , lowerCAmelCase__ : Dict=8_0 , lowerCAmelCase__ : List[str]=1_6_0_0_0 , lowerCAmelCase__ : Optional[int]=8_0 , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Union[str, Any]=True , **lowerCAmelCase__ : int , ) -> Dict:
"""simple docstring"""
super().__init__(feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Any = num_mel_bins
_UpperCAmelCase : Any = do_ceptral_normalize
_UpperCAmelCase : Union[str, Any] = normalize_means
_UpperCAmelCase : int = normalize_vars
_UpperCAmelCase : Any = True
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : str , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase : List[Any] = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
_UpperCAmelCase : List[str] = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 )
_UpperCAmelCase : Any = ta_kaldi.fbank(lowerCAmelCase__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _lowerCAmelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any = True , lowerCAmelCase__ : Any = True , lowerCAmelCase__ : Any = 0.0 , ) -> np.ndarray:
"""simple docstring"""
if normalize_means:
_UpperCAmelCase : Union[str, Any] = x[:input_length].mean(axis=0 )
_UpperCAmelCase : Optional[int] = np.subtract(lowerCAmelCase__ , lowerCAmelCase__ )
if normalize_vars:
_UpperCAmelCase : Dict = x[:input_length].std(axis=0 )
_UpperCAmelCase : int = np.divide(lowerCAmelCase__ , lowerCAmelCase__ )
if input_length < x.shape[0]:
_UpperCAmelCase : Optional[int] = padding_value
# make sure array is in float32
_UpperCAmelCase : Any = x.astype(np.floataa )
return x
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] = None ) -> List[np.ndarray]:
"""simple docstring"""
_UpperCAmelCase : int = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowerCAmelCase__ , lowerCAmelCase__ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
def __call__( self : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] = False , lowerCAmelCase__ : Dict = None , lowerCAmelCase__ : Union[str, Any] = False , lowerCAmelCase__ : Tuple = None , lowerCAmelCase__ : Union[str, Any] = None , lowerCAmelCase__ : int = None , lowerCAmelCase__ : Tuple = None , **lowerCAmelCase__ : int , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_UpperCAmelCase : str = isinstance(lowerCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_UpperCAmelCase : str = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : int = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
_UpperCAmelCase : Union[str, Any] = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : Optional[int] = [raw_speech]
# extract fbank features
_UpperCAmelCase : str = [self._extract_fbank_features(lowerCAmelCase__ ) for waveform in raw_speech]
# convert into correct format for padding
_UpperCAmelCase : Dict = BatchFeature({"input_features": features} )
_UpperCAmelCase : List[Any] = self.pad(
lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
# make sure list is in array format
_UpperCAmelCase : str = padded_inputs.get("input_features" )
if isinstance(input_features[0] , lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for feature in input_features]
_UpperCAmelCase : Tuple = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_UpperCAmelCase : Optional[int] = [np.asarray(lowerCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
_UpperCAmelCase : List[Any] = (
np.array(lowerCAmelCase__ , dtype=np.intaa )
if self._get_padding_strategies(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_UpperCAmelCase : Tuple = self.normalize(
padded_inputs["input_features"] , attention_mask=lowerCAmelCase__ )
if return_tensors is not None:
_UpperCAmelCase : Optional[Any] = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs | 145 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''wavlm'''
def __init__(self , UpperCAmelCase=3_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=1_2_8 , UpperCAmelCase=1_6 , UpperCAmelCase=3_2_0 , UpperCAmelCase=8_0_0 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=1_0 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=1_0 , UpperCAmelCase=3_2_0 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_0 , UpperCAmelCase=2_5_6 , UpperCAmelCase=2_5_6 , UpperCAmelCase=0.1 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=2_5_6 , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=5_1_2 , UpperCAmelCase=8_0 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
_lowercase =hidden_size
_lowercase =feat_extract_norm
_lowercase =feat_extract_activation
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =conv_bias
_lowercase =num_buckets
_lowercase =max_bucket_distance
_lowercase =num_conv_pos_embeddings
_lowercase =num_conv_pos_embedding_groups
_lowercase =len(self.conv_dim )
_lowercase =num_hidden_layers
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =num_attention_heads
_lowercase =hidden_dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =feat_proj_dropout
_lowercase =final_dropout
_lowercase =layerdrop
_lowercase =layer_norm_eps
_lowercase =initializer_range
_lowercase =num_ctc_classes
_lowercase =vocab_size
_lowercase =do_stable_layer_norm
_lowercase =use_weighted_layer_sum
_lowercase =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase =apply_spec_augment
_lowercase =mask_time_prob
_lowercase =mask_time_length
_lowercase =mask_time_min_masks
_lowercase =mask_feature_prob
_lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowercase =num_codevectors_per_group
_lowercase =num_codevector_groups
_lowercase =contrastive_logits_temperature
_lowercase =num_negatives
_lowercase =codevector_dim
_lowercase =proj_codevector_dim
_lowercase =diversity_loss_weight
# ctc loss
_lowercase =ctc_loss_reduction
_lowercase =ctc_zero_infinity
# adapter
_lowercase =add_adapter
_lowercase =adapter_kernel_size
_lowercase =adapter_stride
_lowercase =num_adapter_layers
_lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =xvector_output_dim
@property
def __A (self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 5 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self : Union[str, Any] , A : List[str] , A : List[str]=1_3 , A : Dict=3_2 , A : Dict=2 , A : List[Any]=3 , A : List[str]=1_6 , A : Any=[1, 2, 1] , A : Optional[Any]=[2, 2, 4] , A : Dict=2 , A : Optional[int]=2.0 , A : List[str]=True , A : List[str]=0.0 , A : str=0.0 , A : Optional[Any]=0.1 , A : Optional[int]="gelu" , A : Tuple=False , A : Optional[int]=True , A : Union[str, Any]=0.02 , A : List[str]=1e-5 , A : int=True , A : Union[str, Any]=None , A : List[str]=True , A : Any=1_0 , A : Optional[Any]=8 , ):
_UpperCAmelCase : Any = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : List[Any] = image_size
_UpperCAmelCase : Union[str, Any] = patch_size
_UpperCAmelCase : List[str] = num_channels
_UpperCAmelCase : str = embed_dim
_UpperCAmelCase : str = depths
_UpperCAmelCase : int = num_heads
_UpperCAmelCase : Any = window_size
_UpperCAmelCase : List[Any] = mlp_ratio
_UpperCAmelCase : Any = qkv_bias
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : str = drop_path_rate
_UpperCAmelCase : Tuple = hidden_act
_UpperCAmelCase : Any = use_absolute_embeddings
_UpperCAmelCase : List[Any] = patch_norm
_UpperCAmelCase : List[str] = layer_norm_eps
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : str = is_training
_UpperCAmelCase : str = scope
_UpperCAmelCase : List[Any] = use_labels
_UpperCAmelCase : List[str] = type_sequence_label_size
_UpperCAmelCase : Tuple = encoder_stride
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : Dict = None
if self.use_labels:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self : Optional[int] ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def snake_case_ ( self : List[str] , A : Optional[Any] , A : Tuple , A : Tuple ):
_UpperCAmelCase : List[Any] = SwinvaModel(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[int] = model(A )
_UpperCAmelCase : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case_ ( self : List[str] , A : Dict , A : Dict , A : Optional[Any] ):
_UpperCAmelCase : Tuple = SwinvaForMaskedImageModeling(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[int] = model(A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCAmelCase : int = 1
_UpperCAmelCase : int = SwinvaForMaskedImageModeling(A )
model.to(A )
model.eval()
_UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : Dict = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case_ ( self : Tuple , A : List[Any] , A : Union[str, Any] , A : str ):
_UpperCAmelCase : int = self.type_sequence_label_size
_UpperCAmelCase : Dict = SwinvaForImageClassification(A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[Any] = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = config_and_inputs
_UpperCAmelCase : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE : List[Any] = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Dict = False
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : Dict = SwinvaModelTester(self )
_UpperCAmelCase : Tuple = ConfigTester(self , config_class=A , embed_dim=3_7 )
def snake_case_ ( self : Any ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ ( self : Any ):
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." )
def snake_case_ ( self : str ):
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds" )
def snake_case_ ( self : Union[str, Any] ):
pass
def snake_case_ ( self : str ):
_UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def snake_case_ ( self : Tuple ):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Tuple = model_class(A )
_UpperCAmelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
_UpperCAmelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A )
def snake_case_ ( self : Dict ):
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Optional[Any] = True
for model_class in self.all_model_classes:
_UpperCAmelCase : Any = True
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Dict = True
_UpperCAmelCase : Any = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**self._prepare_for_class(A , A ) )
_UpperCAmelCase : Any = outputs.attentions
_UpperCAmelCase : Optional[int] = len(self.model_tester.depths )
self.assertEqual(len(A ) , A )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : Optional[int] = config.window_size**2
_UpperCAmelCase : int = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_UpperCAmelCase : int = model(**self._prepare_for_class(A , A ) )
_UpperCAmelCase : Dict = outputs.attentions
self.assertEqual(len(A ) , A )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
_UpperCAmelCase : Dict = len(A )
# Check attention is always last and order is fine
_UpperCAmelCase : Dict = True
_UpperCAmelCase : int = True
_UpperCAmelCase : Tuple = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(A , A ) )
if hasattr(self.model_tester , "num_hidden_states_types" ):
_UpperCAmelCase : Union[str, Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_UpperCAmelCase : int = 2
self.assertEqual(out_len + added_hidden_states , len(A ) )
_UpperCAmelCase : Optional[Any] = outputs.attentions
self.assertEqual(len(A ) , A )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def snake_case_ ( self : str , A : List[str] , A : Tuple , A : int , A : int ):
_UpperCAmelCase : Any = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Any = model(**self._prepare_for_class(A , A ) )
_UpperCAmelCase : Dict = outputs.hidden_states
_UpperCAmelCase : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(A ) , A )
# Swinv2 has a different seq_length
_UpperCAmelCase : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_UpperCAmelCase : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(A ) , A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = reshaped_hidden_states[0].shape
_UpperCAmelCase : List[str] = (
reshaped_hidden_states[0].view(A , A , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case_ ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase : int = True
self.check_hidden_states_output(A , A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Optional[Any] = True
self.check_hidden_states_output(A , A , A , A )
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : int = 3
_UpperCAmelCase : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase : List[str] = True
self.check_hidden_states_output(A , A , A , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : List[Any] = True
self.check_hidden_states_output(A , A , A , (padded_height, padded_width) )
def snake_case_ ( self : Tuple ):
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def snake_case_ ( self : Any ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : int = SwinvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def snake_case_ ( self : str ):
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : str = _config_zero_init(A )
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = model_class(config=A )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self : str ):
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self : Any ):
_UpperCAmelCase : Any = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to(
A )
_UpperCAmelCase : Optional[int] = self.default_image_processor
_UpperCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_UpperCAmelCase : Union[str, Any] = image_processor(images=A , return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**A )
# verify the logits
_UpperCAmelCase : int = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A )
_UpperCAmelCase : List[Any] = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
| 202 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_lowerCAmelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : int = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
_lowerCAmelCase : List[Any] = {
"google/electra-small-generator": 5_12,
"google/electra-base-generator": 5_12,
"google/electra-large-generator": 5_12,
"google/electra-small-discriminator": 5_12,
"google/electra-base-discriminator": 5_12,
"google/electra-large-discriminator": 5_12,
}
_lowerCAmelCase : Optional[Any] = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Tuple = ElectraTokenizer
def __init__( self : Dict , A : Dict=None , A : Optional[int]=None , A : Dict=True , A : Optional[Any]="[UNK]" , A : Any="[SEP]" , A : str="[PAD]" , A : Tuple="[CLS]" , A : Optional[Any]="[MASK]" , A : Any=True , A : Tuple=None , **A : Any , ):
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
_UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , A ) != do_lower_case
or normalizer_state.get("strip_accents" , A ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , A ) != tokenize_chinese_chars
):
_UpperCAmelCase : Union[str, Any] = getattr(A , normalizer_state.pop("type" ) )
_UpperCAmelCase : Dict = do_lower_case
_UpperCAmelCase : Optional[int] = strip_accents
_UpperCAmelCase : Any = tokenize_chinese_chars
_UpperCAmelCase : Optional[Any] = normalizer_class(**A )
_UpperCAmelCase : int = do_lower_case
def snake_case_ ( self : Tuple , A : str , A : int=None ):
_UpperCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self : Any , A : List[int] , A : Optional[List[int]] = None ):
_UpperCAmelCase : Any = [self.sep_token_id]
_UpperCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self : Any , A : str , A : Optional[str] = None ):
_UpperCAmelCase : List[Any] = self._tokenizer.model.save(A , name=A )
return tuple(A )
| 202 | 1 |
from __future__ import annotations
class a__ :
"""simple docstring"""
def __init__( self , lowercase ) -> None:
'''simple docstring'''
A__ = data
A__ = None
A__ = None
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCAmelCase__ ( ) -> None: # Main function for testing.
'''simple docstring'''
A__ = Node(1 )
A__ = Node(2 )
A__ = Node(3 )
A__ = Node(4 )
A__ = Node(5 )
A__ = Node(6 )
A__ = Node(7 )
A__ = Node(8 )
A__ = Node(9 )
print(is_full_binary_tree(SCREAMING_SNAKE_CASE_ ) )
print(depth_of_tree(SCREAMING_SNAKE_CASE_ ) )
print("Tree is: " )
display(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 68 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_( snake_case : Tuple ):
'''simple docstring'''
snake_case_ = FileLock(str(tmpdir / "foo.lock" ) )
snake_case_ = FileLock(str(tmpdir / "foo.lock" ) )
snake_case_ = 0.01
with locka.acquire():
with pytest.raises(snake_case ):
snake_case_ = time.time()
locka.acquire(snake_case )
assert time.time() - _start > timeout
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = "a" * 1_0_0_0 + ".lock"
snake_case_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(snake_case )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
snake_case_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case ):
locka.acquire(0 )
| 85 | 0 |
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase ):
_lowerCAmelCase = len(set_a.intersection(lowerCAmelCase ) )
if alternative_union:
_lowerCAmelCase = len(lowerCAmelCase ) + len(lowerCAmelCase )
else:
_lowerCAmelCase = len(set_a.union(lowerCAmelCase ) )
return intersection / union
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(lowerCAmelCase , (list, tuple) ):
_lowerCAmelCase = [element for element in set_a if element in set_b]
if alternative_union:
_lowerCAmelCase = len(lowerCAmelCase ) + len(lowerCAmelCase )
return len(lowerCAmelCase ) / union
else:
_lowerCAmelCase = set_a + [element for element in set_b if element not in set_a]
return len(lowerCAmelCase ) / len(lowerCAmelCase )
return len(lowerCAmelCase ) / len(lowerCAmelCase )
return None
if __name__ == "__main__":
A__ : List[Any] ={'''a''', '''b''', '''c''', '''d''', '''e'''}
A__ : Tuple ={'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 369 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
A__ : int ={'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict =['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
A__ : List[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 220 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
snake_case : str = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class snake_case_ (lowerCamelCase_ ):
def __init__( self :List[str] ,**__snake_case :Dict ) -> List[str]:
super().__init__(**__snake_case )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self ,'vision' )
self.check_model_type(__snake_case )
def __call__( self :str ,__snake_case :Union[str, "Image.Image", List[Dict[str, Any]]] ,__snake_case :Union[str, List[str]] = None ,**__snake_case :Dict ,) -> Optional[int]:
if "text_queries" in kwargs:
a__ = kwargs.pop('text_queries' )
if isinstance(__snake_case ,(str, Image.Image) ):
a__ = {'image': image, 'candidate_labels': candidate_labels}
else:
a__ = image
a__ = super().__call__(__snake_case ,**__snake_case )
return results
def lowerCamelCase__( self :int ,**__snake_case :List[Any] ) -> str:
a__ = {}
if "threshold" in kwargs:
a__ = kwargs['threshold']
if "top_k" in kwargs:
a__ = kwargs['top_k']
return {}, {}, postprocess_params
def lowerCamelCase__( self :Tuple ,__snake_case :List[Any] ) -> Optional[int]:
a__ = load_image(inputs['image'] )
a__ = inputs['candidate_labels']
if isinstance(__snake_case ,__snake_case ):
a__ = candidate_labels.split(',' )
a__ = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa )
for i, candidate_label in enumerate(__snake_case ):
a__ = self.tokenizer(__snake_case ,return_tensors=self.framework )
a__ = self.image_processor(__snake_case ,return_tensors=self.framework )
yield {
"is_last": i == len(__snake_case ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowerCamelCase__( self :Tuple ,__snake_case :Optional[Any] ) -> List[str]:
a__ = model_inputs.pop('target_size' )
a__ = model_inputs.pop('candidate_label' )
a__ = model_inputs.pop('is_last' )
a__ = self.model(**__snake_case )
a__ = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def lowerCamelCase__( self :List[str] ,__snake_case :Any ,__snake_case :Tuple=0.1 ,__snake_case :int=None ) -> Optional[Any]:
a__ = []
for model_output in model_outputs:
a__ = model_output['candidate_label']
a__ = BaseModelOutput(__snake_case )
a__ = self.image_processor.post_process_object_detection(
outputs=__snake_case ,threshold=__snake_case ,target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
a__ = outputs['scores'][index].item()
a__ = self._get_bounding_box(outputs['boxes'][index][0] )
a__ = {'score': score, 'label': label, 'box': box}
results.append(__snake_case )
a__ = sorted(__snake_case ,key=lambda __snake_case : x["score"] ,reverse=__snake_case )
if top_k:
a__ = results[:top_k]
return results
def lowerCamelCase__( self :Optional[int] ,__snake_case :"torch.Tensor" ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
a__ , a__ , a__ , a__ = box.int().tolist()
a__ = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 240 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class snake_case_ (lowerCamelCase_ ):
@staticmethod
@abstractmethod
def lowerCamelCase__( __snake_case :ArgumentParser ) -> Dict:
raise NotImplementedError()
@abstractmethod
def lowerCamelCase__( self :Union[str, Any] ) -> Dict:
raise NotImplementedError()
| 240 | 1 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=2_4 , __lowerCamelCase=2 , __lowerCamelCase=6 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=None , __lowerCamelCase=1_0_0_0 , ) -> int:
_SCREAMING_SNAKE_CASE : Union[str, Any] = parent
_SCREAMING_SNAKE_CASE : Dict = batch_size
_SCREAMING_SNAKE_CASE : Dict = seq_length
_SCREAMING_SNAKE_CASE : Optional[Any] = is_training
_SCREAMING_SNAKE_CASE : List[str] = use_input_mask
_SCREAMING_SNAKE_CASE : List[str] = use_token_type_ids
_SCREAMING_SNAKE_CASE : str = use_labels
_SCREAMING_SNAKE_CASE : List[str] = vocab_size
_SCREAMING_SNAKE_CASE : str = hidden_size
_SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
_SCREAMING_SNAKE_CASE : Dict = num_attention_heads
_SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : List[str] = hidden_act
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
_SCREAMING_SNAKE_CASE : Any = type_vocab_size
_SCREAMING_SNAKE_CASE : int = type_sequence_label_size
_SCREAMING_SNAKE_CASE : Tuple = initializer_range
_SCREAMING_SNAKE_CASE : List[Any] = num_labels
_SCREAMING_SNAKE_CASE : Tuple = scope
_SCREAMING_SNAKE_CASE : str = range_bbox
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_SCREAMING_SNAKE_CASE : List[Any] = bbox[i, j, 3]
_SCREAMING_SNAKE_CASE : Union[str, Any] = bbox[i, j, 1]
_SCREAMING_SNAKE_CASE : List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_SCREAMING_SNAKE_CASE : Optional[Any] = bbox[i, j, 2]
_SCREAMING_SNAKE_CASE : Dict = bbox[i, j, 0]
_SCREAMING_SNAKE_CASE : Dict = t
_SCREAMING_SNAKE_CASE : Tuple = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE : int = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase_ ( self ) -> Union[str, Any]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = LiltModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_SCREAMING_SNAKE_CASE : int = model(_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[str] = model(_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = self.num_labels
_SCREAMING_SNAKE_CASE : Optional[Any] = LiltForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_SCREAMING_SNAKE_CASE : Any = model(
_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = LiltForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_SCREAMING_SNAKE_CASE : Optional[Any] = model(
_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
(
_SCREAMING_SNAKE_CASE
) : Any = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__snake_case = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
return True
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Dict = LiltModelTester(self )
_SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> str:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE : Dict = type
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = LiltModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_torch
@slow
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Any = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[1, 2]] , device=_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Any = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[int] = model(input_ids=_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : str = torch.Size([1, 2, 7_6_8] )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_SCREAMING_SNAKE_CASE , )
self.assertTrue(outputs.last_hidden_state.shape , _SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
| 364 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# ===== initialization =====
_SCREAMING_SNAKE_CASE : List[Any] = Mock()
_SCREAMING_SNAKE_CASE : Optional[Any] = conn, Mock()
_SCREAMING_SNAKE_CASE : Dict = iter([1, None] )
_SCREAMING_SNAKE_CASE : Optional[Any] = lambda __lowerCamelCase : next(__lowerCamelCase )
# ===== invoke =====
send_file(filename="mytext.txt", testing=__lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once() | 325 | 0 |
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ :
def __init__( self : int , lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = n
lowercase_ = [None] * self.n
lowercase_ = 0 # index of the first element
lowercase_ = 0
lowercase_ = 0
def __len__( self : Optional[Any]):
"""simple docstring"""
return self.size
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
return self.size == 0
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : int):
"""simple docstring"""
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""")
lowercase_ = data
lowercase_ = (self.rear + 1) % self.n
self.size += 1
return self
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
if self.size == 0:
raise Exception("""UNDERFLOW""")
lowercase_ = self.array[self.front]
lowercase_ = None
lowercase_ = (self.front + 1) % self.n
self.size -= 1
return temp
| 136 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """Speech2TextFeatureExtractor"""
lowerCAmelCase_ = """Speech2TextTokenizer"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = self.feature_extractor
lowerCamelCase__ = False
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
lowerCamelCase__ = kwargs.pop('''raw_speech''' )
else:
lowerCamelCase__ = kwargs.pop('''audio''' , __lowerCAmelCase )
lowerCamelCase__ = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
lowerCamelCase__ = kwargs.pop('''text''' , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
lowerCamelCase__ = args[0]
lowerCamelCase__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
lowerCamelCase__ = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None:
lowerCamelCase__ = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ = encodings['''input_ids''']
return inputs
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@contextmanager
def __lowerCamelCase ( self ):
'''simple docstring'''
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
lowerCamelCase__ = True
lowerCamelCase__ = self.tokenizer
yield
lowerCamelCase__ = self.feature_extractor
lowerCamelCase__ = False
| 209 | 0 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : Tuple = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : str = "umt5"
A : Dict = ["past_key_values"]
def __init__( self : List[str] , UpperCAmelCase_ : Optional[int]=2_5_0_1_1_2 , UpperCAmelCase_ : str=5_1_2 , UpperCAmelCase_ : Tuple=6_4 , UpperCAmelCase_ : Optional[Any]=1_0_2_4 , UpperCAmelCase_ : Tuple=8 , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[Any]=6 , UpperCAmelCase_ : str=3_2 , UpperCAmelCase_ : Union[str, Any]=1_2_8 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : str=1.0 , UpperCAmelCase_ : List[Any]="gated-gelu" , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[str]="T5Tokenizer" , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : str=0 , **UpperCAmelCase_ : str , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=UpperCAmelCase_ , tokenizer_class=UpperCAmelCase_ , tie_word_embeddings=UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
a : Union[str, Any] = vocab_size
a : Optional[int] = d_model
a : Any = d_kv
a : Optional[Any] = d_ff
a : str = num_layers
a : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a : Optional[int] = num_heads
a : int = relative_attention_num_buckets
a : Optional[int] = relative_attention_max_distance
a : List[str] = dropout_rate
a : List[Any] = layer_norm_epsilon
a : Optional[int] = initializer_factor
a : Union[str, Any] = feed_forward_proj
a : Optional[Any] = use_cache
a : Dict = self.feed_forward_proj.split('-')
a : Tuple = act_info[-1]
a : str = act_info[0] == 'gated'
if len(UpperCAmelCase_) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
if feed_forward_proj == "gated-gelu":
a : List[str] = 'gelu_new'
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return self.d_model
@property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return self.num_heads
@property
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return self.num_layers
class UpperCamelCase ( a_ ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[str] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
a : List[str] = 'past_encoder_sequence + sequence'
a : Tuple = {0: 'batch'}
a : str = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
a : Union[str, Any] = {0: 'batch', 1: 'decoder_sequence'}
a : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase_ , direction='inputs')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return 1_3
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return 5e-4
| 345 | '''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=1_3 , UpperCAmelCase_ : List[str]=3_0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=3_2 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Dict=3_7 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=2 , ):
"""simple docstring"""
a : Any = parent
a : Optional[int] = batch_size
a : str = image_size
a : str = patch_size
a : List[Any] = num_channels
a : Optional[int] = is_training
a : Dict = use_labels
a : Any = hidden_size
a : Optional[int] = num_hidden_layers
a : int = num_attention_heads
a : int = intermediate_size
a : Any = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Dict = type_sequence_label_size
a : Tuple = initializer_range
a : List[str] = scope
a : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[Any] = (image_size // patch_size) ** 2
a : str = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : List[Any] = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : int = ViTModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = ViTForMaskedImageModeling(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a : int = 1
a : Union[str, Any] = ViTForMaskedImageModeling(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : str = self.type_sequence_label_size
a : Tuple = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a : List[Any] = 1
a : Union[str, Any] = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) ,
) : Tuple = config_and_inputs
a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : str = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A : Optional[Any] = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
A : List[str] = True
A : Optional[int] = False
A : Dict = False
A : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : str = ViTModelTester(self)
a : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
a : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = ViTModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
"""simple docstring"""
a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[Any] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(UpperCAmelCase_)
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(**UpperCAmelCase_)
# verify the logits
a : List[str] = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Union[str, Any] = torch.tensor([-0.27_44, 0.82_15, -0.08_36]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[str] = ViTModel.from_pretrained('facebook/dino-vits8').to(UpperCAmelCase_)
a : Any = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_8_0)
a : int = prepare_img()
a : List[str] = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : List[str] = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_)
# verify the logits
a : Dict = torch.Size((1, 3_6_0_1, 3_8_4))
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_)
a : str = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto')
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : Tuple = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
a : Tuple = model(UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = '''philschmid/bart-large-cnn-samsum'''
UpperCamelCase_ : List[str] = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
UpperCamelCase_ : Optional[Any] = '''summarizer'''
UpperCamelCase_ : Optional[int] = AutoTokenizer
UpperCamelCase_ : List[str] = AutoModelForSeqaSeqLM
UpperCamelCase_ : Dict = ['''text''']
UpperCamelCase_ : Union[str, Any] = ['''text''']
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.pre_processor(lowerCAmelCase__ , return_tensors="pt" , truncation=lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : int ) -> str:
"""simple docstring"""
return self.model.generate(**lowerCAmelCase__ )[0]
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : str ) -> Optional[int]:
"""simple docstring"""
return self.pre_processor.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ ) | 145 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = '''vit_msn'''
def __init__( self : Optional[int] , lowerCAmelCase__ : str=7_6_8 , lowerCAmelCase__ : List[str]=1_2 , lowerCAmelCase__ : int=1_2 , lowerCAmelCase__ : Optional[Any]=3_0_7_2 , lowerCAmelCase__ : Tuple="gelu" , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : int=1e-06 , lowerCAmelCase__ : Union[str, Any]=2_2_4 , lowerCAmelCase__ : Optional[int]=1_6 , lowerCAmelCase__ : List[str]=3 , lowerCAmelCase__ : str=True , **lowerCAmelCase__ : Optional[Any] , ) -> int:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : int = num_attention_heads
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : Tuple = layer_norm_eps
_UpperCAmelCase : int = image_size
_UpperCAmelCase : Tuple = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Optional[int] = qkv_bias | 145 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self :Optional[Any] ,_UpperCamelCase :Dict ,_UpperCamelCase :Any=7 ,_UpperCamelCase :Dict=3 ,_UpperCamelCase :Tuple=1_8 ,_UpperCamelCase :str=3_0 ,_UpperCamelCase :Optional[Any]=4_0_0 ,_UpperCamelCase :int=True ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :Optional[int]=True ,_UpperCamelCase :List[Any]=False ,_UpperCamelCase :int=True ,_UpperCamelCase :Optional[Any]=True ,_UpperCamelCase :Union[str, Any]=[0.5, 0.5, 0.5] ,_UpperCamelCase :Union[str, Any]=[0.5, 0.5, 0.5] ,):
snake_case_ : int = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Tuple = image_size
snake_case_ : Dict = min_resolution
snake_case_ : Optional[int] = max_resolution
snake_case_ : List[str] = do_resize
snake_case_ : Union[str, Any] = size if size is not None else {"""height""": 1_8, """width""": 2_0}
snake_case_ : List[Any] = do_thumbnail
snake_case_ : List[str] = do_align_axis
snake_case_ : str = do_pad
snake_case_ : Optional[int] = do_normalize
snake_case_ : Dict = image_mean
snake_case_ : List[Any] = image_std
def a__ ( self :Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
lowercase : Optional[Any] = DonutImageProcessor if is_vision_available() else None
def a__ ( self :int ):
snake_case_ : Tuple = DonutImageProcessingTester(self )
@property
def a__ ( self :List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self :Any ):
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"""do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"""size""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"""do_thumbnail""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"""do_align_long_axis""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"""do_pad""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"""do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"""image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"""image_std""" ) )
def a__ ( self :str ):
snake_case_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 1_8, """width""": 2_0} )
snake_case_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 )
self.assertEqual(image_processor.size ,{"""height""": 4_2, """width""": 4_2} )
# Previous config had dimensions in (width, height) order
snake_case_ : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=(4_2, 8_4) )
self.assertEqual(image_processor.size ,{"""height""": 8_4, """width""": 4_2} )
def a__ ( self :Any ):
pass
@is_flaky()
def a__ ( self :Any ):
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE ,Image.Image )
# Test not batched input
snake_case_ : str = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
snake_case_ : Optional[Any] = image_processing(__SCREAMING_SNAKE_CASE ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
@is_flaky()
def a__ ( self :List[Any] ):
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__SCREAMING_SNAKE_CASE ,numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE ,np.ndarray )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
snake_case_ : List[str] = image_processing(__SCREAMING_SNAKE_CASE ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
@is_flaky()
def a__ ( self :int ):
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__SCREAMING_SNAKE_CASE ,torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE ,torch.Tensor )
# Test not batched input
snake_case_ : Optional[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
snake_case_ : str = image_processing(__SCREAMING_SNAKE_CASE ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,) | 351 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def UpperCAmelCase ( lowerCamelCase_ :Callable[[int | float], int | float] , lowerCamelCase_ :int | float , lowerCamelCase_ :int | float , lowerCamelCase_ :int = 1_00 , ):
'''simple docstring'''
snake_case_ : Tuple = x_start
snake_case_ : Optional[int] = fnc(lowerCamelCase_ )
snake_case_ : Optional[int] = 0.0
for _ in range(lowerCamelCase_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
snake_case_ : int = (x_end - x_start) / steps + xa
snake_case_ : Union[str, Any] = fnc(lowerCamelCase_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
snake_case_ : Any = xa
snake_case_ : str = fxa
return area
if __name__ == "__main__":
def UpperCAmelCase ( lowerCamelCase_ :Any ):
'''simple docstring'''
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
__A : List[str] = 10
while i <= 100_000:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 10 | 8 | 0 |
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : int ):
A__ = word.split()
def justify(_lowerCamelCase : list , _lowerCamelCase : int , _lowerCamelCase : int ) -> str:
A__ = max_width - width
A__ = len(_lowerCamelCase )
if len(_lowerCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
A__ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
A__ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
A__ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowerCamelCase ):
num_spaces_between_words_list[i] += 1
A__ = []
for i in range(_lowerCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowerCamelCase )
A__ = []
A__ = []
A__ = 0
for word in words:
if width + len(_lowerCamelCase ) + len(_lowerCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowerCamelCase )
width += len(_lowerCamelCase )
else:
# justify the line and add it to result
answer.append(justify(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) )
# reset new line and new width
A__, A__ = [word], len(_lowerCamelCase )
A__ = max_width - width - len(_lowerCamelCase )
answer.append(" ".join(_lowerCamelCase ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 237 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """char"""
__lowercase = """bpe"""
__lowercase = """wp"""
__lowerCAmelCase : Union[str, Any] =(DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = ["""image_processor""", """char_tokenizer"""]
__lowercase = """ViTImageProcessor"""
__lowercase = """MgpstrTokenizer"""
def __init__( self :int , lowercase_ :int=None , lowercase_ :List[str]=None , **lowercase_ :List[Any] )-> Optional[Any]:
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
A__ = kwargs.pop("feature_extractor" )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
A__ = tokenizer
A__ = AutoTokenizer.from_pretrained("gpt2" )
A__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(lowercase_ , lowercase_ )
def __call__( self :Optional[Any] , lowercase_ :Any=None , lowercase_ :Tuple=None , lowercase_ :List[str]=None , **lowercase_ :Union[str, Any] )-> Optional[Any]:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
A__ = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None:
A__ = self.char_tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
A__ = encodings["input_ids"]
return inputs
def UpperCAmelCase_ ( self :List[str] , lowercase_ :int )-> int:
A__, A__, A__ = sequences
A__ = char_preds.size(0 )
A__, A__ = self._decode_helper(lowercase_ , "char" )
A__, A__ = self._decode_helper(lowercase_ , "bpe" )
A__, A__ = self._decode_helper(lowercase_ , "wp" )
A__ = []
A__ = []
for i in range(lowercase_ ):
A__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
A__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
A__ = scores.index(max(lowercase_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
A__ = {}
A__ = final_strs
A__ = final_scores
A__ = char_strs
A__ = bpe_strs
A__ = wp_strs
return out
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :List[str] , lowercase_ :str )-> Optional[Any]:
if format == DecodeType.CHARACTER:
A__ = self.char_decode
A__ = 1
A__ = "[s]"
elif format == DecodeType.BPE:
A__ = self.bpe_decode
A__ = 2
A__ = "#"
elif format == DecodeType.WORDPIECE:
A__ = self.wp_decode
A__ = 1_02
A__ = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
A__, A__ = [], []
A__ = pred_logits.size(0 )
A__ = pred_logits.size(1 )
A__, A__ = pred_logits.topk(1 , dim=-1 , largest=lowercase_ , sorted=lowercase_ )
A__ = preds_index.view(-1 , lowercase_ )[:, 1:]
A__ = decoder(lowercase_ )
A__, A__ = torch.nn.functional.softmax(lowercase_ , dim=2 ).max(dim=2 )
A__ = preds_max_prob[:, 1:]
for index in range(lowercase_ ):
A__ = preds_str[index].find(lowercase_ )
A__ = preds_str[index][:pred_eos]
A__ = preds_index[index].cpu().tolist()
A__ = pred_index.index(lowercase_ ) if eos_token in pred_index else -1
A__ = preds_max_prob[index][: pred_eos_index + 1]
A__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowercase_ )
conf_scores.append(lowercase_ )
return dec_strs, conf_scores
def UpperCAmelCase_ ( self :Dict , lowercase_ :Optional[Any] )-> int:
A__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(lowercase_ )]
return decode_strs
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :Optional[Any] )-> List[str]:
return self.bpe_tokenizer.batch_decode(lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :List[str] )-> Union[str, Any]:
A__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(lowercase_ )]
return decode_strs
| 237 | 1 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ , A__ , ):
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case_:
def __init__( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Union[str, Any]=3_2 , UpperCamelCase_ : int=1_6 , UpperCamelCase_ : str=3 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[Any]=3_2 , UpperCamelCase_ : Optional[int]=4 , UpperCamelCase_ : Tuple=[0, 1, 2, 3] , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Union[str, Any]=3_7 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : List[str]=3 , UpperCamelCase_ : Any=[1, 3_8_4, 2_4, 2_4] , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Tuple=None , ):
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : Dict = batch_size
lowerCAmelCase : Any = image_size
lowerCAmelCase : List[Any] = patch_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : Optional[Any] = is_training
lowerCAmelCase : List[Any] = use_labels
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : Optional[Any] = backbone_out_indices
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : List[Any] = hidden_dropout_prob
lowerCAmelCase : Dict = attention_probs_dropout_prob
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : Tuple = num_labels
lowerCAmelCase : Any = backbone_featmap_shape
lowerCAmelCase : List[str] = scope
lowerCAmelCase : List[Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase : Any = (image_size // patch_size) ** 2
lowerCAmelCase : List[str] = num_patches + 1
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : List[Any] = None
if self.use_labels:
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : str = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [9_6, 1_9_2, 3_8_4, 7_6_8],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=UpperCamelCase_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict ):
lowerCAmelCase : Optional[Any] = DPTModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Dict = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Dict = self.num_labels
lowerCAmelCase : List[Any] = DPTForDepthEstimation(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : int = model(UpperCamelCase_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : Optional[int] = self.num_labels
lowerCAmelCase : Optional[int] = DPTForSemanticSegmentation(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : int = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : str = self.prepare_config_and_inputs()
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : int = config_and_inputs
lowerCAmelCase : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_( a__ , a__ , unittest.TestCase ):
__UpperCamelCase = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__UpperCamelCase = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Any = DPTModelTester(self )
lowerCAmelCase : str = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=3_7 )
def lowerCamelCase__ ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def lowerCamelCase__ ( self : int ):
pass
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase, lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : str = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase, lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : List[str] = model_class(UpperCamelCase_ )
lowerCAmelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
lowerCAmelCase : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowerCAmelCase, lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[int] = True
if model_class in get_values(UpperCamelCase_ ):
continue
lowerCAmelCase : List[str] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
lowerCAmelCase : Dict = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
lowerCAmelCase : int = model(**UpperCamelCase_ ).loss
loss.backward()
def lowerCamelCase__ ( self : int ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowerCAmelCase, lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : Optional[int] = True
if model_class in get_values(UpperCamelCase_ ) or not model_class.supports_gradient_checkpointing:
continue
lowerCAmelCase : Union[str, Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.gradient_checkpointing_enable()
model.train()
lowerCAmelCase : int = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = model(**UpperCamelCase_ ).loss
loss.backward()
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Dict = _config_zero_init(UpperCamelCase_ )
for model_class in self.all_model_classes:
lowerCAmelCase : int = model_class(config=UpperCamelCase_ )
# Skip the check for the backbone
lowerCAmelCase : Tuple = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowerCAmelCase : Any = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCamelCase__ ( self : List[str] ):
pass
@slow
def lowerCamelCase__ ( self : List[str] ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowerCAmelCase : Any = DPTModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
lowerCAmelCase, lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : List[Any] = '''add'''
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase : Optional[int] = DPTForDepthEstimation(UpperCamelCase_ )
def _snake_case ( ):
lowerCAmelCase : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : Tuple = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
lowerCAmelCase : Optional[Any] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(UpperCamelCase_ )
lowerCAmelCase : str = prepare_img()
lowerCAmelCase : int = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase : List[Any] = model(**UpperCamelCase_ )
lowerCAmelCase : Tuple = outputs.predicted_depth
# verify the predicted depth
lowerCAmelCase : List[Any] = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , UpperCamelCase_ )
lowerCAmelCase : Dict = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , UpperCamelCase_ , atol=1E-4 ) )
| 60 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case_( a__ ):
def __init__( self : int , UpperCamelCase_ : VQModel , UpperCamelCase_ : UNetaDModel , UpperCamelCase_ : DDIMScheduler ):
super().__init__()
self.register_modules(vqvae=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , **UpperCamelCase_ : Optional[int] , ):
lowerCAmelCase : Dict = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCamelCase_ , )
lowerCAmelCase : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase : List[str] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCamelCase_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCAmelCase : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase : List[str] = {}
if accepts_eta:
lowerCAmelCase : List[Any] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCAmelCase : List[str] = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
# predict the noise residual
lowerCAmelCase : Tuple = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase : Optional[Any] = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
# decode the image latents with the VAE
lowerCAmelCase : Dict = self.vqvae.decode(UpperCamelCase_ ).sample
lowerCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : List[str] = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 60 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def a ( self : List[str] ):
__UpperCAmelCase = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
__UpperCAmelCase = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
__UpperCAmelCase = '''The dog is cute and lives in the garden house'''
__UpperCAmelCase = jnp.array([tokenizer.encode(_lowercase )] )
__UpperCAmelCase = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
__UpperCAmelCase = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
__UpperCAmelCase = model(_lowercase )['''last_hidden_state''']
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _lowercase , atol=1E-3 ) )
| 371 |
"""simple docstring"""
from __future__ import annotations
import bisect
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int , snake_case_ :int = 0 , snake_case_ :int = -1 ):
if hi < 0:
__UpperCAmelCase = len(snake_case_ )
while lo < hi:
__UpperCAmelCase = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__UpperCAmelCase = mid + 1
else:
__UpperCAmelCase = mid
return lo
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int , snake_case_ :int = 0 , snake_case_ :int = -1 ):
if hi < 0:
__UpperCAmelCase = len(snake_case_ )
while lo < hi:
__UpperCAmelCase = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__UpperCAmelCase = mid + 1
else:
__UpperCAmelCase = mid
return lo
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int , snake_case_ :int = 0 , snake_case_ :int = -1 ):
sorted_collection.insert(bisect_left(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int , snake_case_ :int = 0 , snake_case_ :int = -1 ):
sorted_collection.insert(bisect_right(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int ):
__UpperCAmelCase = 0
__UpperCAmelCase = len(snake_case_ ) - 1
while left <= right:
__UpperCAmelCase = left + (right - left) // 2
__UpperCAmelCase = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__UpperCAmelCase = midpoint - 1
else:
__UpperCAmelCase = midpoint + 1
return None
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int ):
__UpperCAmelCase = bisect.bisect_left(snake_case_ , snake_case_ )
if index != len(snake_case_ ) and sorted_collection[index] == item:
return index
return None
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int , snake_case_ :int , snake_case_ :int ):
if right < left:
return None
__UpperCAmelCase = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(snake_case_ , snake_case_ , snake_case_ , midpoint - 1 )
else:
return binary_search_by_recursion(snake_case_ , snake_case_ , midpoint + 1 , snake_case_ )
if __name__ == "__main__":
_lowercase : Optional[Any] = input('Enter numbers separated by comma:\n').strip()
_lowercase : Optional[int] = sorted(int(item) for item in user_input.split(','))
_lowercase : Optional[Any] = int(input('Enter a single number to be found in the list:\n'))
_lowercase : int = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 86 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : List[Any] = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
A_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 165 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : List[str] = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["YolosFeatureExtractor"]
A_ : Optional[int] = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
A_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 165 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__UpperCamelCase : Optional[int] = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
__UpperCamelCase : List[str] = {
"gpt-neox-20b": 2048,
}
class __magic_name__ ( __lowerCAmelCase):
A: Union[str, Any] = VOCAB_FILES_NAMES
A: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A: Dict = ["input_ids", "attention_mask"]
def __init__( self : Any , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : str="<|endoftext|>" , lowerCamelCase__ : Union[str, Any]="<|endoftext|>" , lowerCamelCase__ : Any="<|endoftext|>" , lowerCamelCase__ : Tuple=False , **lowerCamelCase__ : Tuple , ) -> List[str]:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCamelCase__ : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCamelCase__ ) != add_prefix_space:
UpperCamelCase__ : Any = getattr(lowerCamelCase__ , pre_tok_state.pop('''type''' ) )
UpperCamelCase__ : str = add_prefix_space
UpperCamelCase__ : str = pre_tok_class(**lowerCamelCase__ )
UpperCamelCase__ : Dict = add_prefix_space
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
UpperCamelCase__ : Tuple = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : "Conversation" ) -> List[int]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) + [self.eos_token_id] )
if len(lowerCamelCase__ ) > self.model_max_length:
UpperCamelCase__ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
| 51 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Tuple = TypeVar("DatasetType", Dataset, IterableDataset)
def _a ( SCREAMING_SNAKE_CASE : List[DatasetType] , SCREAMING_SNAKE_CASE : Optional[List[float]] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[DatasetInfo] = None , SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(SCREAMING_SNAKE_CASE ):
if not isinstance(SCREAMING_SNAKE_CASE , (Dataset, IterableDataset) ):
if isinstance(SCREAMING_SNAKE_CASE , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(SCREAMING_SNAKE_CASE )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(SCREAMING_SNAKE_CASE ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(SCREAMING_SNAKE_CASE ).__name__}." )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ : List[Any] = (
(Dataset, IterableDataset) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else (IterableDataset, Dataset)
)
elif not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , info=SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE , stopping_strategy=SCREAMING_SNAKE_CASE )
else:
return _interleave_iterable_datasets(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , info=SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE , stopping_strategy=SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : List[DatasetType] , SCREAMING_SNAKE_CASE : Optional[DatasetInfo] = None , SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE : int = 0 , ):
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(SCREAMING_SNAKE_CASE ):
if not isinstance(SCREAMING_SNAKE_CASE , (Dataset, IterableDataset) ):
if isinstance(SCREAMING_SNAKE_CASE , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(SCREAMING_SNAKE_CASE )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(SCREAMING_SNAKE_CASE ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(SCREAMING_SNAKE_CASE ).__name__}." )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ : List[str] = (
(Dataset, IterableDataset) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else (IterableDataset, Dataset)
)
elif not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(SCREAMING_SNAKE_CASE , info=SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE , axis=SCREAMING_SNAKE_CASE )
else:
return _concatenate_iterable_datasets(SCREAMING_SNAKE_CASE , info=SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE , axis=SCREAMING_SNAKE_CASE )
| 51 | 1 |
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> float:
return round(float(moles / volume ) * nfactor )
def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> float:
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> float:
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> float:
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 239 | '''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class __magic_name__ :
def __init__( self : str , lowercase_ : Dict ):
if isinstance(lowercase_ , lowercase_ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowercase_ : List[Any] = deepcopy(lowercase_ )
elif os.path.exists(lowercase_ ):
with io.open(lowercase_ , """r""" , encoding="""utf-8""" ) as f:
lowercase_ : Union[str, Any] = json.load(lowercase_ )
else:
try:
lowercase_ : int = baseaa.urlsafe_baadecode(lowercase_ ).decode("""utf-8""" )
lowercase_ : str = json.loads(lowercase_ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
lowercase_ : Any = config
self.set_stage_and_offload()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowercase_ : Tuple = self.get_value("""zero_optimization.stage""" , -1 )
# offload
lowercase_ : str = False
if self.is_zeroa() or self.is_zeroa():
lowercase_ : Dict = set(["""cpu""", """nvme"""] )
lowercase_ : List[Any] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowercase_ : Tuple = True
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Any ):
lowercase_ : Optional[Any] = self.config
# find the config node of interest if it exists
lowercase_ : Tuple = ds_key_long.split(""".""" )
lowercase_ : Union[str, Any] = nodes.pop()
for node in nodes:
lowercase_ : List[str] = config.get(lowercase_ )
if config is None:
return None, ds_key
return config, ds_key
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[str] , lowercase_ : List[str]=None ):
lowercase_ , lowercase_ : List[Any] = self.find_config_node(lowercase_ )
if config is None:
return default
return config.get(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : int=False ):
lowercase_ : int = self.config
# find the config node of interest if it exists
lowercase_ : Dict = ds_key_long.split(""".""" )
for node in nodes:
lowercase_ : List[Any] = config
lowercase_ : Dict = config.get(lowercase_ )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Tuple ):
lowercase_ : str = self.get_value(lowercase_ )
return False if value is None else bool(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
lowercase_ : Union[str, Any] = self.get_value(lowercase_ )
return False if value is None else not bool(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
return self._stage == 2
def SCREAMING_SNAKE_CASE_ ( self : Any ):
return self._stage == 3
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return self._offload
class __magic_name__ :
def __init__( self : Any , lowercase_ : Union[str, Any] ):
lowercase_ : Any = engine
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : int , **lowercase_ : str ):
# runs backpropagation and handles mixed precision
self.engine.backward(lowercase_ , **lowercase_ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Optional[Any] , lowercase_ : Tuple ):
super().__init__(lowercase_ , device_placement=lowercase_ , scaler=lowercase_ )
lowercase_ : Any = hasattr(self.optimizer , """overflow""" )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Tuple=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Tuple ):
super().__init__(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class __magic_name__ :
def __init__( self : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str]=0.0_01 , lowercase_ : List[str]=0 , **lowercase_ : List[Any] ):
lowercase_ : str = params
lowercase_ : List[Any] = lr
lowercase_ : int = weight_decay
lowercase_ : Union[str, Any] = kwargs
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : List[str]=None , lowercase_ : int=0 , **lowercase_ : int ):
lowercase_ : Union[str, Any] = optimizer
lowercase_ : List[str] = total_num_steps
lowercase_ : Dict = warmup_num_steps
lowercase_ : Dict = kwargs
| 239 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger(__name__)
__A : List[str] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__A : Tuple = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : str , A__ : Tuple , A__ : Any , A__ : Union[str, Any] ):
'''simple docstring'''
for attribute in key.split(""".""" ):
lowerCAmelCase_ : Optional[int] = getattr(A__ , A__ )
if weight_type is not None:
lowerCAmelCase_ : Union[str, Any] = getattr(A__ , A__ ).shape
else:
lowerCAmelCase_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
lowerCAmelCase_ : Tuple = value
elif weight_type == "weight_g":
lowerCAmelCase_ : Optional[int] = value
elif weight_type == "weight_v":
lowerCAmelCase_ : Tuple = value
elif weight_type == "bias":
lowerCAmelCase_ : List[str] = value
elif weight_type == "running_mean":
lowerCAmelCase_ : str = value
elif weight_type == "running_var":
lowerCAmelCase_ : List[str] = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase_ : Union[str, Any] = value
elif weight_type == "inv_freq":
lowerCAmelCase_ : Optional[Any] = value
else:
lowerCAmelCase_ : int = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCamelCase_ ( A__ : Optional[Any] , A__ : Any , A__ : List[Any] ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : Tuple = fairseq_model.state_dict()
lowerCAmelCase_ : Tuple = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase_ : int = False
if "conv_layers" in name:
load_conv_layer(
A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == """group""" , )
lowerCAmelCase_ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase_ : Dict = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCAmelCase_ : int = True
if "*" in mapped_key:
lowerCAmelCase_ : Optional[int] = name.split(A__ )[0].split(""".""" )[-2]
lowerCAmelCase_ : List[str] = mapped_key.replace("""*""" , A__ )
if "pos_bias_u" in name:
lowerCAmelCase_ : List[str] = None
elif "pos_bias_v" in name:
lowerCAmelCase_ : Any = None
elif "weight_g" in name:
lowerCAmelCase_ : str = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase_ : Tuple = """weight_v"""
elif "bias" in name:
lowerCAmelCase_ : Optional[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase_ : Optional[Any] = """weight"""
elif "running_mean" in name:
lowerCAmelCase_ : List[str] = """running_mean"""
elif "inv_freq" in name:
lowerCAmelCase_ : Union[str, Any] = """inv_freq"""
elif "running_var" in name:
lowerCAmelCase_ : Any = """running_var"""
elif "num_batches_tracked" in name:
lowerCAmelCase_ : Tuple = """num_batches_tracked"""
else:
lowerCAmelCase_ : Any = None
set_recursively(A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCamelCase_ ( A__ : Tuple , A__ : Any , A__ : List[Any] , A__ : str , A__ : Tuple ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = full_name.split("""conv_layers.""" )[-1]
lowerCAmelCase_ : int = name.split(""".""" )
lowerCAmelCase_ : Dict = int(items[0] )
lowerCAmelCase_ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
lowerCAmelCase_ : Optional[Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
lowerCAmelCase_ : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
lowerCAmelCase_ : Tuple = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
lowerCAmelCase_ : List[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(A__ )
@torch.no_grad()
def UpperCamelCase_ ( A__ : Dict , A__ : Dict , A__ : Dict=None , A__ : List[Any]=None , A__ : Any=True ):
'''simple docstring'''
if config_path is not None:
lowerCAmelCase_ : List[str] = WavaVecaConformerConfig.from_pretrained(A__ , hidden_act="""swish""" )
else:
lowerCAmelCase_ : int = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCAmelCase_ : str = """rotary"""
if is_finetuned:
if dict_path:
lowerCAmelCase_ : Optional[Any] = Dictionary.load(A__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase_ : Optional[int] = target_dict.pad_index
lowerCAmelCase_ : List[str] = target_dict.bos_index
lowerCAmelCase_ : List[Any] = target_dict.eos_index
lowerCAmelCase_ : Optional[Any] = len(target_dict.symbols )
lowerCAmelCase_ : Optional[int] = os.path.join(A__ , """vocab.json""" )
if not os.path.isdir(A__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(A__ ) )
return
os.makedirs(A__ , exist_ok=A__ )
lowerCAmelCase_ : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : List[str] = 1
with open(A__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(A__ , A__ )
lowerCAmelCase_ : List[str] = WavaVecaCTCTokenizer(
A__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=A__ , )
lowerCAmelCase_ : Optional[Any] = True if config.feat_extract_norm == """layer""" else False
lowerCAmelCase_ : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=A__ , return_attention_mask=A__ , )
lowerCAmelCase_ : str = WavaVecaProcessor(feature_extractor=A__ , tokenizer=A__ )
processor.save_pretrained(A__ )
lowerCAmelCase_ : Dict = WavaVecaConformerForCTC(A__ )
else:
lowerCAmelCase_ : List[Any] = WavaVecaConformerForPreTraining(A__ )
if is_finetuned:
lowerCAmelCase_ : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
lowerCAmelCase_ : Dict = argparse.Namespace(task="""audio_pretraining""" )
lowerCAmelCase_ : str = fairseq.tasks.setup_task(A__ )
lowerCAmelCase_ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A__ )
lowerCAmelCase_ : int = model[0].eval()
recursively_load_weights(A__ , A__ , not is_finetuned )
hf_wavavec.save_pretrained(A__ )
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__A : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 371 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( A__ : int | float | str , A__ : int | float | str ):
'''simple docstring'''
if nth_term == "":
return [""]
lowerCAmelCase_ : str = int(A__ )
lowerCAmelCase_ : Tuple = int(A__ )
lowerCAmelCase_ : list[str] = []
for temp in range(int(A__ ) ):
series.append(f'1 / {pow(temp + 1 , int(A__ ) )}' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : str = int(input("Enter the last number (nth term) of the P-Series"))
__A : Tuple = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 89 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
UpperCamelCase_ = TypeVar('''T''')
class _snake_case ( Generic[T] ):
'''simple docstring'''
def __init__( self: List[str] ,lowerCamelCase_: list[T] ,lowerCamelCase_: Callable[[T, T], T] ) -> None:
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Tuple = len(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = [any_type for _ in range(self.N )] + arr
UpperCAmelCase_ : Optional[int] = fnc
self.build()
def A__ ( self: int ) -> None:
for p in range(self.N - 1 ,0 ,-1 ):
UpperCAmelCase_ : List[str] = self.fn(self.st[p * 2] ,self.st[p * 2 + 1] )
def A__ ( self: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: T ) -> None:
p += self.N
UpperCAmelCase_ : List[str] = v
while p > 1:
UpperCAmelCase_ : str = p // 2
UpperCAmelCase_ : str = self.fn(self.st[p * 2] ,self.st[p * 2 + 1] )
def A__ ( self: Dict ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> T | None: # noqa: E741
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = l + self.N, r + self.N
UpperCAmelCase_ : Optional[Any] = None
while l <= r:
if l % 2 == 1:
UpperCAmelCase_ : Optional[int] = self.st[l] if res is None else self.fn(lowerCamelCase_ ,self.st[l] )
if r % 2 == 0:
UpperCAmelCase_ : Optional[Any] = self.st[r] if res is None else self.fn(lowerCamelCase_ ,self.st[r] )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
UpperCamelCase_ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
UpperCamelCase_ = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
UpperCamelCase_ = SegmentTree(test_array, min)
UpperCamelCase_ = SegmentTree(test_array, max)
UpperCamelCase_ = SegmentTree(test_array, lambda a, b: a + b)
def lowerCamelCase_ ( ):
'''simple docstring'''
for i in range(len(_A ) ):
for j in range(_A , len(_A ) ):
UpperCAmelCase_ : Tuple = reduce(_A , test_array[i : j + 1] )
UpperCAmelCase_ : List[Any] = reduce(_A , test_array[i : j + 1] )
UpperCAmelCase_ : Optional[Any] = reduce(lambda _a , _a : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(_A , _A )
assert max_range == max_segment_tree.query(_A , _A )
assert sum_range == sum_segment_tree.query(_A , _A )
test_all_segments()
for index, value in test_updates.items():
UpperCamelCase_ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 345 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Dict = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307 | 0 |
def _lowerCAmelCase ( __lowerCAmelCase = 1000 ) -> int:
"""simple docstring"""
snake_case__ , snake_case__ : List[Any] = 1, 1
snake_case__ : Union[str, Any] = 2
while True:
snake_case__ : Optional[int] = 0
snake_case__ : str = fa + fa
snake_case__ , snake_case__ : Optional[int] = fa, f
index += 1
for _ in str(__lowerCAmelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 44 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class a ( unittest.TestCase ):
def __init__( self :Optional[Any] ,__lowercase :List[Any] ,__lowercase :Tuple=7 ,__lowercase :Optional[Any]=3 ,__lowercase :Dict=3_0 ,__lowercase :Union[str, Any]=4_0_0 ,__lowercase :Optional[int]=True ,__lowercase :int=None ,__lowercase :int=0.9 ,__lowercase :Optional[int]=None ,__lowercase :Dict=True ,__lowercase :str=[0.5, 0.5, 0.5] ,__lowercase :str=[0.5, 0.5, 0.5] ,):
snake_case__ : List[Any] = size if size is not None else {'''shortest_edge''': 3_0}
snake_case__ : Any = crop_size if crop_size is not None else {'''height''': 3_0, '''width''': 3_0}
snake_case__ : Dict = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Tuple = num_channels
snake_case__ : List[Any] = min_resolution
snake_case__ : int = max_resolution
snake_case__ : str = do_resize_and_center_crop
snake_case__ : Dict = size
snake_case__ : Union[str, Any] = crop_pct
snake_case__ : List[str] = crop_size
snake_case__ : Optional[Any] = do_normalize
snake_case__ : Tuple = image_mean
snake_case__ : List[str] = image_std
def __lowerCamelCase ( self :Any ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Any = PoolFormerImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Tuple = PoolFormerImageProcessingTester(self )
@property
def __lowerCamelCase ( self :Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase ,'''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(__lowercase ,'''size''' ) )
self.assertTrue(hasattr(__lowercase ,'''crop_pct''' ) )
self.assertTrue(hasattr(__lowercase ,'''do_normalize''' ) )
self.assertTrue(hasattr(__lowercase ,'''image_mean''' ) )
self.assertTrue(hasattr(__lowercase ,'''image_std''' ) )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 3_0} )
self.assertEqual(image_processor.crop_size ,{'''height''': 3_0, '''width''': 3_0} )
snake_case__ : int = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 ,crop_size=8_4 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size ,{'''height''': 8_4, '''width''': 8_4} )
def __lowerCamelCase ( self :int ):
pass
def __lowerCamelCase ( self :Optional[Any] ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
snake_case__ : Union[str, Any] = image_processing(__lowercase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def __lowerCamelCase ( self :List[str] ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowercase ,numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,np.ndarray )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
snake_case__ : str = image_processing(__lowercase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def __lowerCamelCase ( self :Optional[Any] ):
# Initialize image_processing
snake_case__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowercase ,torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,torch.Tensor )
# Test not batched input
snake_case__ : Dict = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
snake_case__ : str = image_processing(__lowercase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
| 44 | 1 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCamelCase__ ( _A , _A , _A , _A ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowerCamelCase__ ( _A , _A , _A , _A , _A=True ):
model.train()
a : str = model(UpperCAmelCase_ )
a : List[str] = F.mse_loss(UpperCAmelCase_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCAmelCase_ )
def lowerCamelCase__ ( _A , _A=False ):
set_seed(42 )
a : List[Any] = RegressionModel()
a : Any = deepcopy(UpperCAmelCase_ )
a : Tuple = RegressionDataset(length=80 )
a : Tuple = DataLoader(UpperCAmelCase_ , batch_size=16 )
model.to(accelerator.device )
if sched:
a : str = AdamW(params=model.parameters() , lr=1E-3 )
a : str = AdamW(params=ddp_model.parameters() , lr=1E-3 )
a : List[str] = LambdaLR(UpperCAmelCase_ , lr_lambda=lambda _A : epoch**0.65 )
a : List[str] = LambdaLR(UpperCAmelCase_ , lr_lambda=lambda _A : epoch**0.65 )
# Make a copy of `model`
if sched:
a : List[Any] = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
a : str = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCamelCase__ ( _A ):
a : str = get_training_setup(UpperCAmelCase_ )
# Use a single batch
a : Dict = next(iter(UpperCAmelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
a : int = accelerator.gather((ddp_input, ddp_target) )
a : Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCAmelCase_ ):
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
# Sync grads
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
a : Union[str, Any] = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )]
def lowerCamelCase__ ( _A ):
a : List[str] = get_training_setup(UpperCAmelCase_ )
# Use a single batch
a : List[str] = next(iter(UpperCAmelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
a : List[Any] = accelerator.gather((ddp_input, ddp_target) )
a : Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCAmelCase_ ):
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
# Sync grads
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
a : Any = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )]
def lowerCamelCase__ ( _A=False , _A=False ):
a : Optional[int] = Accelerator(
split_batches=UpperCAmelCase_ , dispatch_batches=UpperCAmelCase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
a : List[str] = get_training_setup(UpperCAmelCase_ )
for iteration, batch in enumerate(UpperCAmelCase_ ):
a : List[Any] = batch.values()
# Gather the distributed inputs and targs for the base model
a : List[str] = accelerator.gather((ddp_input, ddp_target) )
a : List[str] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCAmelCase_ ):
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCAmelCase_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
a : List[str] = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )]
GradientState._reset_state()
def lowerCamelCase__ ( _A=False , _A=False ):
a : Optional[Any] = Accelerator(
split_batches=UpperCAmelCase_ , dispatch_batches=UpperCAmelCase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
a : Optional[Any] = get_training_setup(UpperCAmelCase_ , UpperCAmelCase_ )
for iteration, batch in enumerate(UpperCAmelCase_ ):
a : int = batch.values()
# Gather the distributed inputs and targs for the base model
a : List[str] = accelerator.gather((ddp_input, ddp_target) )
a : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCAmelCase_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCAmelCase_ ):
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
a : Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCAmelCase_ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def lowerCamelCase__ ( ):
a : Optional[Any] = Accelerator()
a : int = RegressionDataset(length=80 )
a : List[str] = DataLoader(UpperCAmelCase_ , batch_size=16 )
a : List[Any] = RegressionDataset(length=96 )
a : Any = DataLoader(UpperCAmelCase_ , batch_size=16 )
a : Optional[int] = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCAmelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCAmelCase_ )
if iteration < len(UpperCAmelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCAmelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCAmelCase_ )
if batch_num < len(UpperCAmelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCamelCase__ ( ):
a : Optional[int] = Accelerator()
a : Optional[int] = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(UpperCAmelCase_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(UpperCAmelCase_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(UpperCAmelCase_ , UpperCAmelCase_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase__ ( _A ):
main()
if __name__ == "__main__":
main() | 297 |
from __future__ import annotations
def __lowerCamelCase ( UpperCAmelCase_ : dict , UpperCAmelCase_ : str ):
"""simple docstring"""
a , a :Optional[Any] = set(UpperCAmelCase_ ), [start]
while stack:
a :Optional[int] = stack.pop()
explored.add(UpperCAmelCase_ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(UpperCAmelCase_ )
return explored
snake_case : Optional[int] = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 94 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __A ( A_ ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case ,'''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_snake_case ,'''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(_snake_case ,'''num_encoder_blocks''' ) )
class __A :
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : List[Any] ,_snake_case : Union[str, Any]=13 ,_snake_case : Optional[Any]=64 ,_snake_case : Dict=3 ,_snake_case : Dict=4 ,_snake_case : int=[2, 2, 2, 2] ,_snake_case : str=[8, 4, 2, 1] ,_snake_case : List[str]=[16, 32, 64, 128] ,_snake_case : Any=[1, 4, 8, 16] ,_snake_case : Dict=[1, 2, 4, 8] ,_snake_case : List[Any]=True ,_snake_case : Tuple=True ,_snake_case : Any="gelu" ,_snake_case : List[str]=0.1 ,_snake_case : Optional[Any]=0.1 ,_snake_case : Optional[int]=0.02 ,_snake_case : List[str]=3 ,_snake_case : str=None ,) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = parent
lowercase__ : Dict = batch_size
lowercase__ : Tuple = image_size
lowercase__ : str = num_channels
lowercase__ : int = num_encoder_blocks
lowercase__ : List[Any] = sr_ratios
lowercase__ : str = depths
lowercase__ : str = hidden_sizes
lowercase__ : Dict = downsampling_rates
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Optional[Any] = is_training
lowercase__ : Any = use_labels
lowercase__ : List[Any] = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : List[str] = initializer_range
lowercase__ : Tuple = num_labels
lowercase__ : List[Any] = scope
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : str = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
lowercase__ : str = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size ,num_channels=self.num_channels ,num_encoder_blocks=self.num_encoder_blocks ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def UpperCAmelCase ( self : List[Any] ,_snake_case : Dict ,_snake_case : Any ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = SegformerModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : List[Any] = model(_snake_case )
lowercase__ : Dict = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCAmelCase ( self : Any ,_snake_case : Any ,_snake_case : str ,_snake_case : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = self.num_labels
lowercase__ : Union[str, Any] = SegformerForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Tuple = model(_snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowercase__ : Any = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss ,0.0 )
def UpperCAmelCase ( self : str ,_snake_case : Any ,_snake_case : str ,_snake_case : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ : Any = 1
lowercase__ : int = SegformerForSemanticSegmentation(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Union[str, Any] = torch.randint(0 ,1 ,(self.batch_size, self.image_size, self.image_size) ).to(_snake_case )
lowercase__ : Any = model(_snake_case ,labels=_snake_case )
self.parent.assertGreater(result.loss ,0.0 )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ : Tuple = config_and_inputs
lowercase__ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Tuple = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase : Union[str, Any] = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase : List[str] = True
lowerCAmelCase : str = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : str = False
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
lowercase__ : List[Any] = SegformerModelTester(self )
lowercase__ : Dict = SegformerConfigTester(self ,config_class=_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : str ) -> str:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_snake_case )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def UpperCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Any = [*signature.parameters.keys()]
lowercase__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = True
for model_class in self.all_model_classes:
lowercase__ : Tuple = True
lowercase__ : int = False
lowercase__ : Optional[int] = True
lowercase__ : Optional[int] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : List[Any] = outputs.attentions
lowercase__ : Optional[Any] = sum(self.model_tester.depths )
self.assertEqual(len(_snake_case ) ,_snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Dict = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
# verify the first attentions (first block, first layer)
lowercase__ : Union[str, Any] = (self.model_tester.image_size // 4) ** 2
lowercase__ : int = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
# verify the last attentions (last block, last layer)
lowercase__ : List[str] = (self.model_tester.image_size // 32) ** 2
lowercase__ : List[Any] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) ,[self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] ,)
lowercase__ : Optional[int] = len(_snake_case )
# Check attention is always last and order is fine
lowercase__ : str = True
lowercase__ : Dict = True
lowercase__ : Dict = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
self.assertEqual(out_len + 1 ,len(_snake_case ) )
lowercase__ : str = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
# verify the first attentions (first block, first layer)
lowercase__ : List[str] = (self.model_tester.image_size // 4) ** 2
lowercase__ : List[str] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
def UpperCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : List[Any] ,_snake_case : Dict ,_snake_case : Tuple ):
lowercase__ : List[str] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Tuple = outputs.hidden_states
lowercase__ : Dict = self.model_tester.num_encoder_blocks
self.assertEqual(len(_snake_case ) ,_snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : str = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : Any ) -> str:
"""simple docstring"""
if not self.model_tester.is_training:
return
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = True
for model_class in self.all_model_classes:
if model_class in get_values(_snake_case ):
continue
lowercase__ : Union[str, Any] = model_class(_snake_case )
model.to(_snake_case )
model.train()
lowercase__ : List[Any] = self._prepare_for_class(_snake_case ,_snake_case ,return_labels=_snake_case )
lowercase__ : int = model(**_snake_case ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
pass
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[int] = SegformerModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
lowercase__ : List[str] = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=_snake_case ,align=_snake_case ,do_random_crop=_snake_case )
lowercase__ : Dict = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_snake_case )
lowercase__ : Dict = prepare_img()
lowercase__ : int = image_processor(images=_snake_case ,return_tensors='''pt''' )
lowercase__ : Dict = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
lowercase__ : Optional[Any] = model(_snake_case )
lowercase__ : Optional[int] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : List[Any] = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,_snake_case ,atol=1e-4 ) )
@slow
def UpperCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ : Any = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=_snake_case ,align=_snake_case ,do_random_crop=_snake_case )
lowercase__ : str = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(_snake_case )
lowercase__ : List[Any] = prepare_img()
lowercase__ : Optional[int] = image_processor(images=_snake_case ,return_tensors='''pt''' )
lowercase__ : Any = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
lowercase__ : Dict = model(_snake_case )
lowercase__ : Optional[Any] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Any = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,_snake_case ,atol=1e-1 ) )
@slow
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=_snake_case ,align=_snake_case ,do_random_crop=_snake_case )
lowercase__ : Optional[int] = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_snake_case )
lowercase__ : Union[str, Any] = prepare_img()
lowercase__ : Optional[int] = image_processor(images=_snake_case ,return_tensors='''pt''' )
lowercase__ : int = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
lowercase__ : Any = model(_snake_case )
lowercase__ : Dict = outputs.logits.detach().cpu()
lowercase__ : Tuple = image_processor.post_process_semantic_segmentation(outputs=_snake_case ,target_sizes=[(500, 300)] )
lowercase__ : Tuple = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,_snake_case )
lowercase__ : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=_snake_case )
lowercase__ : Union[str, Any] = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape ,_snake_case )
| 354 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Any ,_snake_case : UNetaDModel ,_snake_case : UNetaDModel ,_snake_case : DDPMScheduler ,_snake_case : Any ,) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[int] = value_function
lowercase__ : Optional[int] = unet
lowercase__ : Tuple = scheduler
lowercase__ : Dict = env
lowercase__ : int = env.get_dataset()
lowercase__ : Dict = {}
for key in self.data.keys():
try:
lowercase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowercase__ : List[Any] = {}
for key in self.data.keys():
try:
lowercase__ : str = self.data[key].std()
except: # noqa: E722
pass
lowercase__ : Tuple = env.observation_space.shape[0]
lowercase__ : Optional[int] = env.action_space.shape[0]
def UpperCAmelCase ( self : str ,_snake_case : Any ,_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def UpperCAmelCase ( self : Dict ,_snake_case : int ,_snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
if type(_snake_case ) is dict:
return {k: self.to_torch(_snake_case ) for k, v in x_in.items()}
elif torch.is_tensor(_snake_case ):
return x_in.to(self.unet.device )
return torch.tensor(_snake_case ,device=self.unet.device )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Any ,_snake_case : int ,_snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
for key, val in cond.items():
lowercase__ : List[Any] = val.clone()
return x_in
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ,_snake_case : List[Any] ,_snake_case : int ,_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Any = x.shape[0]
lowercase__ : Dict = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase__ : Dict = torch.full((batch_size,) ,_snake_case ,device=self.unet.device ,dtype=torch.long )
for _ in range(_snake_case ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase__ : int = self.value_function(x.permute(0 ,2 ,1 ) ,_snake_case ).sample
lowercase__ : Optional[Any] = torch.autograd.grad([y.sum()] ,[x] )[0]
lowercase__ : List[str] = self.scheduler._get_variance(_snake_case )
lowercase__ : Union[str, Any] = torch.exp(0.5 * posterior_variance )
lowercase__ : Optional[int] = model_std * grad
lowercase__ : Optional[Any] = 0
lowercase__ : str = x.detach()
lowercase__ : Dict = x + scale * grad
lowercase__ : str = self.reset_xa(_snake_case ,_snake_case ,self.action_dim )
lowercase__ : Union[str, Any] = self.unet(x.permute(0 ,2 ,1 ) ,_snake_case ).sample.permute(0 ,2 ,1 )
# TODO: verify deprecation of this kwarg
lowercase__ : Dict = self.scheduler.step(_snake_case ,_snake_case ,_snake_case ,predict_epsilon=_snake_case )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
lowercase__ : Dict = self.reset_xa(_snake_case ,_snake_case ,self.action_dim )
lowercase__ : Union[str, Any] = self.to_torch(_snake_case )
return x, y
def __call__( self : Union[str, Any] ,_snake_case : Any ,_snake_case : Tuple=64 ,_snake_case : Any=32 ,_snake_case : Optional[Any]=2 ,_snake_case : str=0.1 ) -> List[Any]:
"""simple docstring"""
lowercase__ : Any = self.normalize(_snake_case ,'''observations''' )
lowercase__ : Tuple = obs[None].repeat(_snake_case ,axis=0 )
lowercase__ : Dict = {0: self.to_torch(_snake_case )}
lowercase__ : int = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase__ : Optional[int] = randn_tensor(_snake_case ,device=self.unet.device )
lowercase__ : Tuple = self.reset_xa(_snake_case ,_snake_case ,self.action_dim )
lowercase__ : str = self.to_torch(_snake_case )
# run the diffusion process
lowercase__ , lowercase__ : int = self.run_diffusion(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# sort output trajectories by value
lowercase__ : Optional[Any] = y.argsort(0 ,descending=_snake_case ).squeeze()
lowercase__ : str = x[sorted_idx]
lowercase__ : str = sorted_values[:, :, : self.action_dim]
lowercase__ : Optional[int] = actions.detach().cpu().numpy()
lowercase__ : List[str] = self.de_normalize(_snake_case ,key='''actions''' )
# select the action with the highest value
if y is not None:
lowercase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowercase__ : str = np.random.randint(0 ,_snake_case )
lowercase__ : int = denorm_actions[selected_index, 0]
return denorm_actions
| 302 | 0 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCamelCase : Optional[int] = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
lowerCamelCase : List[str] = 10
lowerCamelCase : Dict = 2_56
def snake_case_ ( lowerCAmelCase_ : List[str] ):
if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS:
return None
__lowercase : List[Any] = MinHash(num_perm=lowerCAmelCase_ )
for token in set(lowerCAmelCase_ ):
min_hash.update(token.encode() )
return min_hash
def snake_case_ ( lowerCAmelCase_ : str ):
return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : str , *,
__a : float = 0.85 , ) -> Dict:
"""simple docstring"""
__lowercase : int = duplication_jaccard_threshold
__lowercase : List[str] = NUM_PERM
__lowercase : Union[str, Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__lowercase : int = defaultdict(__a )
def lowerCAmelCase ( self : List[str] , __a : Tuple , __a : MinHash ) -> None:
"""simple docstring"""
__lowercase : Union[str, Any] = self._index.query(__a )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(__a , __a )
if len(__a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__a )
def lowerCAmelCase ( self : Any ) -> List[List[Dict]]:
"""simple docstring"""
__lowercase : Tuple = []
for base, duplicates in self._duplicate_clusters.items():
__lowercase : Tuple = [base] + list(__a )
# reformat the cluster to be a list of dict
__lowercase : Any = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__a )
return duplicate_clusters
def lowerCAmelCase ( self : List[str] , __a : Optional[int] ) -> None:
"""simple docstring"""
__lowercase : Optional[int] = self.get_duplicate_clusters()
with open(__a , """w""" ) as f:
json.dump(__a , __a )
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ):
__lowercase , __lowercase : Any = element
__lowercase : str = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float ):
__lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase_ , lowerCAmelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Optional[int] = get_tokens(lowerCAmelCase_ )
__lowercase : str = get_tokens(lowerCAmelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCamelCase : Optional[int] = None
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] ):
__lowercase : Tuple = []
for elementa in cluster:
__lowercase : Optional[Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
__lowercase : str = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__lowercase : Tuple = 1
extremes.append(lowerCAmelCase_ )
return extremes
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] ):
global _shared_dataset
__lowercase : Optional[int] = dataset
__lowercase : Union[str, Any] = []
__lowercase : Any = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ):
extremes_list.append(lowerCAmelCase_ )
return extremes_list
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float = 0.85 ):
__lowercase : Optional[Any] = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : List[str] = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
__lowercase : Union[str, Any] = {}
__lowercase : List[Any] = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for extremes in extremes_clusters:
for element in extremes:
__lowercase : List[Any] = element
__lowercase : List[Any] = duplicate_indices - set(extreme_dict.keys() )
__lowercase : Optional[int] = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__lowercase : Optional[Any] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
__lowercase : List[Any] = extreme_dict[element["""base_index"""]]["""copies"""]
print(F"Original dataset size: {len(lowerCAmelCase_ )}" )
print(F"Number of duplicate clusters: {len(lowerCAmelCase_ )}" )
print(F"Files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Unique files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Filtered dataset size: {len(lowerCAmelCase_ )}" )
return ds_filter, duplicate_clusters | 233 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = '''naver-clova-ix/donut-base-finetuned-docvqa'''
_A : Any = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
_A : Tuple = '''document_qa'''
_A : Dict = AutoProcessor
_A : Tuple = VisionEncoderDecoderModel
_A : Optional[int] = ['''image''', '''text''']
_A : Optional[int] = ['''text''']
def __init__( self : Any , *__a : List[str] , **__a : Any ) -> Optional[Any]:
"""simple docstring"""
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*__a , **__a )
def lowerCAmelCase ( self : List[Any] , __a : "Image" , __a : str ) -> List[str]:
"""simple docstring"""
__lowercase : int = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
__lowercase : str = task_prompt.replace("""{user_input}""" , __a )
__lowercase : Union[str, Any] = self.pre_processor.tokenizer(
__a , add_special_tokens=__a , return_tensors="""pt""" ).input_ids
__lowercase : int = self.pre_processor(__a , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowerCAmelCase ( self : Optional[int] , __a : int ) -> int:
"""simple docstring"""
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__a , ).sequences
def lowerCAmelCase ( self : Union[str, Any] , __a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = self.pre_processor.batch_decode(__a )[0]
__lowercase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
__lowercase : Union[str, Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
__lowercase : Optional[Any] = re.sub(r"""<.*?>""" , """""" , __a , count=1 ).strip() # remove first task start token
__lowercase : Dict = self.pre_processor.tokenajson(__a )
return sequence["answer"] | 233 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE : List[Any] = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
_SCREAMING_SNAKE_CASE : str = {"mobilebert-uncased": 5_12}
_SCREAMING_SNAKE_CASE : List[Any] = {}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_INIT_CONFIGURATION
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = MobileBertTokenizer
def __init__( self , __snake_case=None , __snake_case=None , __snake_case=True , __snake_case="[UNK]" , __snake_case="[SEP]" , __snake_case="[PAD]" , __snake_case="[CLS]" , __snake_case="[MASK]" , __snake_case=True , __snake_case=None , **__snake_case , ):
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , tokenize_chinese_chars=__snake_case , strip_accents=__snake_case , **__snake_case , )
snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __snake_case ) != tokenize_chinese_chars
):
snake_case = getattr(__snake_case , normalizer_state.pop('''type''' ) )
snake_case = do_lower_case
snake_case = strip_accents
snake_case = tokenize_chinese_chars
snake_case = normalizer_class(**__snake_case )
snake_case = do_lower_case
def a_ ( self , __snake_case , __snake_case=None ):
snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a_ ( self , __snake_case , __snake_case = None ):
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a_ ( self , __snake_case , __snake_case = None ):
snake_case = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
| 371 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def a_ ( self , __snake_case=0 ):
snake_case = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__snake_case ) )
snake_case = np.random.RandomState(__snake_case )
snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.6_9643, 0.5_8484, 0.5_0314, 0.5_8760, 0.5_5368, 0.5_9643, 0.5_1529, 0.4_1217, 0.4_9087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.6_1737, 0.5_4642, 0.5_3183, 0.5_4465, 0.5_2742, 0.6_0525, 0.4_9969, 0.4_0655, 0.4_8154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
# warmup pass to apply optimizations
snake_case = pipe(**self.get_dummy_inputs() )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.5_2761, 0.5_9977, 0.4_9033, 0.4_9619, 0.5_4282, 0.5_0311, 0.4_7600, 0.4_0918, 0.4_5203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.6_5331, 0.5_8277, 0.4_8204, 0.5_6059, 0.5_3665, 0.5_6235, 0.5_0969, 0.4_0009, 0.4_6552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
@property
def a_ ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self ):
snake_case = ort.SessionOptions()
snake_case = False
return options
def a_ ( self ):
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
snake_case = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = '''A fantasy landscape, trending on artstation'''
snake_case = np.random.RandomState(0 )
snake_case = pipe(
prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__snake_case , output_type='''np''' , )
snake_case = output.images
snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
snake_case = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def a_ ( self ):
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
snake_case = init_image.resize((7_6_8, 5_1_2) )
snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__snake_case , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = '''A fantasy landscape, trending on artstation'''
snake_case = np.random.RandomState(0 )
snake_case = pipe(
prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__snake_case , output_type='''np''' , )
snake_case = output.images
snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
snake_case = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 213 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = StableDiffusionSAGPipeline
lowercase__ = TEXT_TO_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ = False
def lowercase_ ( self : Tuple ):
torch.manual_seed(0 )
a : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
a : str = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
a : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
a : List[str] = CLIPTextModel(__snake_case )
a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase_ ( self : str , __snake_case : str , __snake_case : Union[str, Any]=0 ):
if str(__snake_case ).startswith('mps' ):
a : int = torch.manual_seed(__snake_case )
else:
a : int = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a : Dict = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self : List[str] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
def lowercase_ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Tuple ):
a : Union[str, Any] = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
a : List[Any] = sag_pipe.to(__snake_case )
sag_pipe.set_progress_bar_config(disable=__snake_case )
a : str = '.'
a : Optional[int] = torch.manual_seed(0 )
a : Optional[Any] = sag_pipe(
[prompt] , generator=__snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
a : Any = output.images
a : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
a : List[Any] = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def lowercase_ ( self : List[Any] ):
a : Union[str, Any] = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
a : Optional[int] = sag_pipe.to(__snake_case )
sag_pipe.set_progress_bar_config(disable=__snake_case )
a : Optional[int] = '.'
a : str = torch.manual_seed(0 )
a : Dict = sag_pipe(
[prompt] , generator=__snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
a : Dict = output.images
a : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
a : List[str] = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def lowercase_ ( self : Any ):
a : Optional[int] = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
a : Tuple = sag_pipe.to(__snake_case )
sag_pipe.set_progress_bar_config(disable=__snake_case )
a : Optional[int] = '.'
a : List[Any] = torch.manual_seed(0 )
a : Optional[Any] = sag_pipe(
[prompt] , width=7_68 , height=5_12 , generator=__snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
a : Optional[int] = output.images
assert image.shape == (1, 5_12, 7_68, 3) | 297 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowerCAmelCase: int = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class a__( unittest.TestCase ):
def lowercase_ ( self : int , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
a : Optional[int] = None
a : Tuple = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
a : List[str] = os.path.abspath('examples' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
a : int = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='main()' if parser_only else 'training_function()' , ):
a : List[Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
a : Union[str, Any] = '\n'.join(__snake_case )
if special_strings is not None:
for string in special_strings:
a : Union[str, Any] = diff.replace(__snake_case , '' )
self.assertEqual(__snake_case , '' )
def lowercase_ ( self : Optional[Any] ):
self.one_complete_example('complete_nlp_example.py' , __snake_case )
self.one_complete_example('complete_nlp_example.py' , __snake_case )
def lowercase_ ( self : Any ):
a : Dict = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
a : int = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class a__( lowerCamelCase__ ):
lowercase__ = False
@classmethod
def lowercase_ ( cls : Optional[int] ):
super().setUpClass()
a : List[str] = tempfile.mkdtemp()
a : Tuple = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
a : Optional[int] = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def lowercase_ ( cls : Optional[int] ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowercase_ ( self : Tuple ):
a : Union[str, Any] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def lowercase_ ( self : Dict ):
a : Union[str, Any] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
a : int = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def lowercase_ ( self : Any ):
a : Tuple = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
""".split()
a : int = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('epoch 0:' , __snake_case )
self.assertIn('epoch 1:' , __snake_case )
def lowercase_ ( self : int ):
a : Optional[int] = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
""".split()
a : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
a : Any = torch.cuda.device_count()
else:
a : str = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , __snake_case )
self.assertIn('epoch 1:' , __snake_case )
else:
self.assertIn('epoch 0:' , __snake_case )
self.assertIn('epoch 1:' , __snake_case )
@slow
def lowercase_ ( self : Tuple ):
a : Tuple = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
a : Any = run_command(self._launch_args + testargs , return_stdout=__snake_case )
a : Optional[Any] = re.findall('({.+})' , __snake_case )
a : str = [r for r in results if 'accuracy' in r][-1]
a : str = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def lowercase_ ( self : Optional[int] ):
a : int = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase_ ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdir:
a : Optional[Any] = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , 'tracking' ) ) )
def lowercase_ ( self : List[str] ):
a : Optional[Any] = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def lowercase_ ( self : int ):
a : Optional[Any] = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs ) | 297 | 1 |
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__)
class lowerCAmelCase__ ( folder_based_builder.FolderBasedBuilderConfig ):
__a = None
__a = None
class lowerCAmelCase__ ( folder_based_builder.FolderBasedBuilder ):
__a = datasets.Audio()
__a = """audio"""
__a = AudioFolderConfig
__a = 42 # definition at the bottom of the script
__a = AudioClassification(audio_column="""audio""" , label_column="""label""" )
UpperCAmelCase__ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
UpperCAmelCase__ = AUDIO_EXTENSIONS
| 40 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowerCAmelCase__ :
__a = 42
__a = 42
class lowerCAmelCase__ :
def __init__( self : int , _lowerCamelCase : int ):
_snake_case = [[] for _ in range(_lowerCamelCase )]
_snake_case = size
def __getitem__( self : Optional[int] , _lowerCamelCase : int ):
return iter(self._graph[vertex] )
@property
def lowercase ( self : Optional[int] ):
return self._size
def lowercase ( self : List[Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(_lowerCamelCase , _lowerCamelCase ) )
def lowercase ( self : List[Any] , _lowerCamelCase : int , _lowerCamelCase : int ):
_snake_case = deque([start_vertex] )
_snake_case = [None] * self.size
_snake_case = 0
while queue:
_snake_case = queue.popleft()
_snake_case = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_snake_case = current_distance + edge.weight
_snake_case = distances[edge.destination_vertex]
if (
isinstance(_lowerCamelCase , _lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
_snake_case = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__magic_name__: Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__magic_name__: List[str] = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Any = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""" ) )
__magic_name__ : List[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowerCAmelCase__ , """src/diffusers/schedulers/scheduling_ddpm.py""" ) , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""" ) , )
def __magic_name__ ( self ) -> str:
__magic_name__ : Dict = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Any:
__magic_name__ : Any = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
__magic_name__ : Tuple = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
__magic_name__ : Dict = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
__magic_name__ : Dict = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = os.path.join(self.diffusers_dir , """new_code.py""" )
with open(lowerCAmelCase__ , """w""" , newline="""\n""" ) as f:
f.write(lowerCAmelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase__ )
with open(lowerCAmelCase__ , """r""" ) as f:
self.assertTrue(f.read() , lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[Any] = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Dict:
# Base copy consistency
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , lowerCAmelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , lowerCAmelCase__ ) , )
# Copy consistency with a really long name
__magic_name__ : Dict = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub("""Bert""" , lowerCAmelCase__ , lowerCAmelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , lowerCAmelCase__ , overwrite_result=re.sub("""DDPM""" , """Test""" , lowerCAmelCase__ ) , )
| 342 |
__magic_name__: str = [0, 2, 4, 6, 8]
__magic_name__: Optional[int] = [1, 3, 5, 7, 9]
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1, -1, -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__magic_name__ : List[Any] = 0
for digit in range(10 ):
__magic_name__ : Optional[int] = digit
result += reversible_numbers(
0, (remainder + 2 * digit) // 10, _A, _A )
return result
__magic_name__ : str = 0
for digita in range(10 ):
__magic_name__ : Optional[Any] = digita
if (remainder + digita) % 2 == 0:
__magic_name__ : Tuple = ODD_DIGITS
else:
__magic_name__ : str = EVEN_DIGITS
for digita in other_parity_digits:
__magic_name__ : Tuple = digita
result += reversible_numbers(
remaining_length - 2, (remainder + digita + digita) // 10, _A, _A, )
return result
def UpperCamelCase ( _A = 9 ):
"""simple docstring"""
__magic_name__ : List[str] = 0
for length in range(1, max_power + 1 ):
result += reversible_numbers(_A, 0, [0] * length, _A )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 342 | 1 |
"""simple docstring"""
from __future__ import annotations
from scipy.special import comb # type: ignore
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : Optional[int] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__snake_case : str = len(_snake_case ) - 1
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__snake_case : Tuple = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _snake_case ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_snake_case ) , 5 ) == 1
return output_values
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__snake_case : List[str] = self.basis_function(_snake_case )
__snake_case : Optional[int] = 0.0
__snake_case : Optional[int] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def SCREAMING_SNAKE_CASE (self , a_ = 0.01 ):
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
__snake_case : List[Any] = [] # x coordinates of points to plot
__snake_case : List[str] = [] # y coordinates of points to plot
__snake_case : Any = 0.0
while t <= 1:
__snake_case : int = self.bezier_curve_function(_snake_case )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__snake_case : List[Any] = [i[0] for i in self.list_of_points]
__snake_case : Tuple = [i[1] for i in self.list_of_points]
plt.plot(
_snake_case , _snake_case , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(_snake_case , _snake_case , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 357 |
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( _snake_case : str , _snake_case : str , _snake_case : str ) ->List[Any]:
"""simple docstring"""
def get_masked_lm_array(_snake_case : str ):
__snake_case : int = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : str = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Any = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_array(_snake_case : str ):
__snake_case : List[str] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Union[str, Any] = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Optional[int] = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_layer_array(_snake_case : int , _snake_case : str ):
__snake_case : str = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Optional[int] = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Optional[Any] = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_attention_layer_array(_snake_case : int , _snake_case : str , _snake_case : str ):
__snake_case : Any = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Dict = tf.train.load_variable(_snake_case , _snake_case )
__snake_case : int = array.reshape(_snake_case )
if "kernel" in name:
__snake_case : Optional[int] = array.transpose()
return torch.from_numpy(_snake_case )
print(f"""Loading model based on config from {config_path}...""" )
__snake_case : Optional[Any] = BertConfig.from_json_file(_snake_case )
__snake_case : Dict = BertForMaskedLM(_snake_case )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
__snake_case : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
__snake_case : BertSelfAttention = layer.attention.self
__snake_case : int = get_encoder_attention_layer_array(
_snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
__snake_case : str = get_encoder_attention_layer_array(
_snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
__snake_case : str = get_encoder_attention_layer_array(
_snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
__snake_case : List[Any] = get_encoder_attention_layer_array(
_snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
__snake_case : Tuple = get_encoder_attention_layer_array(
_snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
__snake_case : Union[str, Any] = get_encoder_attention_layer_array(
_snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
__snake_case : BertSelfOutput = layer.attention.output
__snake_case : Dict = get_encoder_attention_layer_array(
_snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
__snake_case : Tuple = get_encoder_attention_layer_array(
_snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
__snake_case : str = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/gamma''' )
__snake_case : Any = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/beta''' )
# Intermediate
__snake_case : BertIntermediate = layer.intermediate
__snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/kernel''' )
__snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/bias''' )
# Output
__snake_case : BertOutput = layer.output
__snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_dense/kernel''' )
__snake_case : Dict = get_encoder_layer_array(_snake_case , '''_output_dense/bias''' )
__snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/gamma''' )
__snake_case : Union[str, Any] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/beta''' )
# Embeddings
__snake_case : Optional[int] = get_encoder_array('''_position_embedding_layer/embeddings''' )
__snake_case : str = get_encoder_array('''_type_embedding_layer/embeddings''' )
__snake_case : int = get_encoder_array('''_embedding_norm_layer/gamma''' )
__snake_case : Tuple = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
__snake_case : Optional[Any] = model.cls.predictions.transform
__snake_case : Dict = get_masked_lm_array('''dense/kernel''' )
__snake_case : Union[str, Any] = get_masked_lm_array('''dense/bias''' )
__snake_case : str = get_masked_lm_array('''layer_norm/gamma''' )
__snake_case : Tuple = get_masked_lm_array('''layer_norm/beta''' )
__snake_case : Tuple = get_masked_lm_array('''embedding_table''' )
# Pooling
__snake_case : Optional[Any] = BertPooler(config=_snake_case )
__snake_case : BertPooler = get_encoder_array('''_pooler_layer/kernel''' )
__snake_case : BertPooler = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(_snake_case )
# Integration test - should load without any errors ;)
__snake_case : Dict = BertForMaskedLM.from_pretrained(_snake_case )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 24 | 0 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[int] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__SCREAMING_SNAKE_CASE )
if number < 0:
return False
lowercase_ : str = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase_ : int = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
lowercase_ : Union[str, Any] = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
lowercase_ : Union[str, Any] = model(input_ids.to(__SCREAMING_SNAKE_CASE ) , labels=labels.to(__SCREAMING_SNAKE_CASE ) ).loss
lowercase_ : int = -(labels.shape[-1] * loss.item())
lowercase_ : Any = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 93 | 1 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Optional[int] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a__ : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : str = field(
default=UpperCamelCase , metadata={"help": "Model type selected in the list: " + ", ".join(UpperCamelCase)})
snake_case__ : str = field(
default=UpperCamelCase , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."})
snake_case__ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ : int = field(
default=128 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
snake_case__ : int = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
snake_case__ : int = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
snake_case__ : bool = field(
default=UpperCamelCase , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."})
snake_case__ : float = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
snake_case__ : int = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
snake_case__ : int = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
snake_case__ : int = field(default=1 , metadata={"help": "multiple threads for converting example to features"})
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : List[str] = "train"
snake_case__ : Optional[Any] = "dev"
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : SquadDataTrainingArguments
snake_case__ : List[SquadFeatures]
snake_case__ : Split
snake_case__ : bool
def __init__( self : Tuple , UpperCAmelCase__ : SquadDataTrainingArguments , UpperCAmelCase__ : PreTrainedTokenizer , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Union[str, Split] = Split.train , UpperCAmelCase__ : Optional[bool] = False , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = "pt" , ) -> str:
__SCREAMING_SNAKE_CASE = args
__SCREAMING_SNAKE_CASE = is_language_sensitive
__SCREAMING_SNAKE_CASE = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
__SCREAMING_SNAKE_CASE = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
__SCREAMING_SNAKE_CASE = mode
# Load data features from cache or dataset file
__SCREAMING_SNAKE_CASE = "v2" if args.version_2_with_negative else "v1"
__SCREAMING_SNAKE_CASE = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__SCREAMING_SNAKE_CASE = cached_features_file + ".lock"
with FileLock(UpperCAmelCase__ ):
if os.path.exists(UpperCAmelCase__ ) and not args.overwrite_cache:
__SCREAMING_SNAKE_CASE = time.time()
__SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__SCREAMING_SNAKE_CASE = self.old_features["features"]
__SCREAMING_SNAKE_CASE = self.old_features.get("dataset" , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.old_features.get("examples" , UpperCAmelCase__ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
" future run" )
else:
if mode == Split.dev:
__SCREAMING_SNAKE_CASE = self.processor.get_dev_examples(args.data_dir )
else:
__SCREAMING_SNAKE_CASE = self.processor.get_train_examples(args.data_dir )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = squad_convert_examples_to_features(
examples=self.examples , tokenizer=UpperCAmelCase__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , UpperCAmelCase__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Dict ) -> List[Any]:
return len(self.features )
def __getitem__( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
__SCREAMING_SNAKE_CASE = self.features[i]
__SCREAMING_SNAKE_CASE = torch.tensor(feature.input_ids , dtype=torch.long )
__SCREAMING_SNAKE_CASE = torch.tensor(feature.attention_mask , dtype=torch.long )
__SCREAMING_SNAKE_CASE = torch.tensor(feature.token_type_ids , dtype=torch.long )
__SCREAMING_SNAKE_CASE = torch.tensor(feature.cls_index , dtype=torch.long )
__SCREAMING_SNAKE_CASE = torch.tensor(feature.p_mask , dtype=torch.float )
__SCREAMING_SNAKE_CASE = torch.tensor(feature.is_impossible , dtype=torch.float )
__SCREAMING_SNAKE_CASE = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__SCREAMING_SNAKE_CASE = torch.tensor(feature.start_position , dtype=torch.long )
__SCREAMING_SNAKE_CASE = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 195 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a__ : Dict = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a__ : Any = TaTokenizerFast
a__ : Tuple = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a__ : str = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 195 | 1 |
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F"{torch_layer} layer.weight does not match"
__lowerCAmelCase : Union[str, Any] = nn.Parameter(_UpperCamelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"{torch_layer} layer.bias does not match"
__lowerCAmelCase : Optional[int] = nn.Parameter(_UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
# set torch weights for 1-to-1 comparison
__lowerCAmelCase : Dict = np.asarray(weights[0] )
__lowerCAmelCase : Dict = np.asarray(weights[1] )
__lowerCAmelCase : Tuple = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_UpperCamelCase ).view(-1 , _UpperCamelCase ).contiguous().transpose(0 , 1 ) , )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
# set torch weights for 1-to-1 comparison
__lowerCAmelCase : List[str] = np.asarray(weights[0] )
__lowerCAmelCase : List[str] = np.asarray(weights[1] )
__lowerCAmelCase : int = np.asarray(weights[2] )
__lowerCAmelCase : int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCamelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_UpperCamelCase ).view(-1 , _UpperCamelCase ).contiguous().transpose(0 , 1 ) , )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
# layernorm 1
__lowerCAmelCase : Optional[Any] = weights[0][0][0]
__lowerCAmelCase : Optional[int] = np.asarray(layer_norm_a[0] )
__lowerCAmelCase : List[str] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_UpperCamelCase ) , torch.tensor(_UpperCamelCase ) , )
# lsh weights + output
__lowerCAmelCase : Tuple = weights[0][1]
if len(_UpperCamelCase ) < 4:
set_layer_weights_in_torch_lsh(_UpperCamelCase , torch_block.attention , _UpperCamelCase )
else:
set_layer_weights_in_torch_local(_UpperCamelCase , torch_block.attention , _UpperCamelCase )
# intermediate weighs
__lowerCAmelCase : Dict = weights[2][0][1][2]
# Chunked Feed Forward
if len(_UpperCamelCase ) == 4:
__lowerCAmelCase : Optional[Any] = intermediate_weights[2]
# layernorm 2
__lowerCAmelCase : Optional[int] = np.asarray(intermediate_weights[0][0] )
__lowerCAmelCase : Dict = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_UpperCamelCase ) , torch.tensor(_UpperCamelCase ) , )
# intermediate dense
__lowerCAmelCase : List[Any] = np.asarray(intermediate_weights[1][0] )
__lowerCAmelCase : List[str] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_UpperCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_UpperCamelCase ) , )
# intermediate out
__lowerCAmelCase : Union[str, Any] = np.asarray(intermediate_weights[4][0] )
__lowerCAmelCase : List[str] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_UpperCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_UpperCamelCase ) , )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
# reformer model
__lowerCAmelCase : Union[str, Any] = torch_model.reformer
# word embeds
__lowerCAmelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_UpperCamelCase ) , )
if isinstance(weights[3] , _UpperCamelCase ):
__lowerCAmelCase : Dict = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__lowerCAmelCase : Union[str, Any] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"{position_embeddings[emb_idx]} emb does not match"
__lowerCAmelCase : int = nn.Parameter(torch.tensor(_UpperCamelCase ) )
__lowerCAmelCase : Optional[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_UpperCamelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__lowerCAmelCase : int = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# output layer norm
__lowerCAmelCase : str = np.asarray(weights[7][0] )
__lowerCAmelCase : Tuple = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_UpperCamelCase ) , torch.tensor(_UpperCamelCase ) , )
# output embeddings
__lowerCAmelCase : Optional[int] = np.asarray(weights[9][0] )
__lowerCAmelCase : str = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_UpperCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_UpperCamelCase ) , )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
# Initialise PyTorch model
__lowerCAmelCase : Optional[Any] = ReformerConfig.from_json_file(_UpperCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
__lowerCAmelCase : int = ReformerModelWithLMHead(_UpperCamelCase )
with open(_UpperCamelCase , 'rb' ) as f:
__lowerCAmelCase : Tuple = pickle.load(_UpperCamelCase )['weights']
set_model_weights_in_torch(_UpperCamelCase , _UpperCamelCase , config.hidden_size )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase__ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 86 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = AutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : int = TFAutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = AutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) | 86 | 1 |
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : List[str] = torch.nn.Linear(2 , 4 )
lowerCAmelCase__ : str = torch.optim.AdamW(model.parameters() , lr=1.0 )
lowerCAmelCase__ : int = torch.optim.lr_scheduler.OneCycleLR(lowercase__ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
lowerCAmelCase__ : Dict = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
lowerCAmelCase__ : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __SCREAMING_SNAKE_CASE ( A_ ):
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(lowercase__ )
class SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
"""simple docstring"""
@require_cuda
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : Optional[Any] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(lowercase_ ):
lowerCAmelCase__ : List[str] = Accelerator(cpu=lowercase_ )
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : List[Any] = Accelerator()
lowerCAmelCase__ : List[str] = GradientState()
assert state.num_steps == 1
lowerCAmelCase__ : List[Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
lowerCAmelCase__ : Union[str, Any] = False
assert state.sync_gradients is False
GradientState._reset_state()
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : str = Accelerator()
lowerCAmelCase__ : Optional[Any] = create_components()
(
lowerCAmelCase__
) : Optional[int] = accelerator.prepare(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Dict = Accelerator()
lowerCAmelCase__ : int = create_components()
accelerator.prepare(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __lowerCAmelCase ( self : List[Any] ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*lowercase_ : List[Any] ,**lowercase_ : Union[str, Any] ):
pass
with patch('''torch.cuda.set_device''' ,lowercase_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
lowerCAmelCase__ : Tuple = Accelerator()
self.assertEqual(str(accelerator.state.device ) ,'''cuda:64''' )
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : List[Any] = Accelerator()
lowerCAmelCase__ : str = create_components()
accelerator.prepare(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
lowerCAmelCase__ : Any = get_signature(lowercase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase_ )
# make sure random weights don't match
load_random_weights(lowercase_ )
self.assertTrue(abs(model_signature - get_signature(lowercase_ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(lowercase_ )
self.assertTrue(abs(model_signature - get_signature(lowercase_ ) ) < 1E-3 )
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : List[Any] = Accelerator()
lowerCAmelCase__ : int = create_components()
accelerator.prepare(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
lowerCAmelCase__ : Dict = get_signature(lowercase_ )
# saving hook
def save_config(lowercase_ : Union[str, Any] ,lowercase_ : List[str] ,lowercase_ : Tuple ):
lowerCAmelCase__ : List[str] = {"""class_name""": models[0].__class__.__name__}
with open(os.path.join(lowercase_ ,'''data.json''' ) ,'''w''' ) as f:
json.dump(lowercase_ ,lowercase_ )
# loading hook
def load_config(lowercase_ : Optional[Any] ,lowercase_ : Optional[Any] ):
with open(os.path.join(lowercase_ ,'''data.json''' ) ,'''r''' ) as f:
lowerCAmelCase__ : Tuple = json.load(lowercase_ )
lowerCAmelCase__ : str = config["""class_name"""]
lowerCAmelCase__ : Union[str, Any] = accelerator.register_save_state_pre_hook(lowercase_ )
lowerCAmelCase__ : Optional[int] = accelerator.register_load_state_pre_hook(lowercase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase_ )
# make sure random weights don't match with hooks
load_random_weights(lowercase_ )
self.assertTrue(abs(model_signature - get_signature(lowercase_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
lowerCAmelCase__ : Tuple = """random"""
# make sure loaded weights match with hooks
accelerator.load_state(lowercase_ )
self.assertTrue(abs(model_signature - get_signature(lowercase_ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase_ )
# make sure random weights don't match with hooks removed
load_random_weights(lowercase_ )
self.assertTrue(abs(model_signature - get_signature(lowercase_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
lowerCAmelCase__ : str = """random"""
# make sure loaded weights match with hooks removed
accelerator.load_state(lowercase_ )
self.assertTrue(abs(model_signature - get_signature(lowercase_ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : List[Any] = Accelerator()
lowerCAmelCase__ : List[str] = create_components()
lowerCAmelCase__ : str = None
# This should work
lowerCAmelCase__ : Any = accelerator.prepare(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
self.assertTrue(dummy_obj is None )
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Dict = Accelerator()
lowerCAmelCase__ : Union[str, Any] = create_components()
lowerCAmelCase__ : List[str] = [1, 2, 3]
# This should work
lowerCAmelCase__ : Union[str, Any] = accelerator.prepare(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
self.assertEqual(
getattr(lowercase_ ,'''_is_accelerate_prepared''' ,lowercase_ ) ,lowercase_ ,'''Dummy object should have `_is_accelerate_prepared` set to `True`''' ,)
self.assertEqual(
getattr(lowercase_ ,'''_is_accelerate_prepared''' ,lowercase_ ) ,lowercase_ ,'''Model is missing `_is_accelerator_prepared` or is set to `False`''' ,)
self.assertEqual(
getattr(lowercase_ ,'''_is_accelerate_prepared''' ,lowercase_ ) ,lowercase_ ,'''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' ,)
self.assertEqual(
getattr(lowercase_ ,'''_is_accelerate_prepared''' ,lowercase_ ) ,lowercase_ ,'''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' ,)
self.assertEqual(
getattr(lowercase_ ,'''_is_accelerate_prepared''' ,lowercase_ ) ,lowercase_ ,'''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' ,)
self.assertEqual(
getattr(lowercase_ ,'''_is_accelerate_prepared''' ,lowercase_ ) ,lowercase_ ,'''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' ,)
@slow
@require_bnb
def __lowerCAmelCase ( self : List[str] ):
from transformers import AutoModelForCausalLM
lowerCAmelCase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' ,load_in_abit=lowercase_ ,device_map={'''''': 0} ,)
lowerCAmelCase__ : Union[str, Any] = Accelerator()
# This should work
lowerCAmelCase__ : Tuple = accelerator.prepare(lowercase_ )
@slow
@require_bnb
def __lowerCAmelCase ( self : List[str] ):
from transformers import AutoModelForCausalLM
lowerCAmelCase__ : List[Any] = Accelerator()
with init_empty_weights():
lowerCAmelCase__ : int = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' ,)
model.tie_weights()
lowerCAmelCase__ : Dict = infer_auto_device_map(lowercase_ )
lowerCAmelCase__ : Tuple = """cpu"""
lowerCAmelCase__ : Tuple = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' ,device_map=lowercase_ ,load_in_abit=lowercase_ ,llm_inta_enable_fpaa_cpu_offload=lowercase_ )
# This should not work and get value error
with self.assertRaises(lowercase_ ):
lowerCAmelCase__ : Dict = accelerator.prepare(lowercase_ )
@slow
@require_bnb
@require_multi_gpu
def __lowerCAmelCase ( self : Dict ):
from transformers import AutoModelForCausalLM
lowerCAmelCase__ : str = {"""distributed_type""": DistributedType.MULTI_GPU}
with init_empty_weights():
lowerCAmelCase__ : List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' ,)
model.tie_weights()
lowerCAmelCase__ : List[Any] = infer_auto_device_map(lowercase_ )
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' ,load_in_abit=lowercase_ ,device_map=lowercase_ ,)
lowerCAmelCase__ : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(lowercase_ ):
lowerCAmelCase__ : int = accelerator.prepare(lowercase_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __lowerCAmelCase ( self : Tuple ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
lowerCAmelCase__ : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' ,)
lowerCAmelCase__ : Optional[int] = infer_auto_device_map(lowercase_ )
lowerCAmelCase__ : Tuple = 1
lowerCAmelCase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' ,load_in_abit=lowercase_ ,device_map=lowercase_ ,)
lowerCAmelCase__ : List[str] = Accelerator()
# This should work
lowerCAmelCase__ : Any = accelerator.prepare(lowercase_ )
@require_cuda
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : List[Any] = torch.nn.Linear(1_0 ,1_0 )
lowerCAmelCase__ : Dict = torch.optim.SGD(model.parameters() ,lr=0.01 )
lowerCAmelCase__ : List[Any] = Accelerator(cpu=lowercase_ )
lowerCAmelCase__ : int = accelerator.prepare(lowercase_ )
| 356 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__UpperCamelCase : Optional[Any] = '''Create a default config file for Accelerate with only a few flags set.'''
def __SCREAMING_SNAKE_CASE ( A_="no" , A_ = default_json_config_file , A_ = False ):
lowerCAmelCase__ : List[Any] = Path(A_ )
path.parent.mkdir(parents=A_ , exist_ok=A_ )
if path.exists():
print(
f'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
lowerCAmelCase__ : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
lowerCAmelCase__ : Optional[Any] = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
lowerCAmelCase__ : Union[str, Any] = torch.cuda.device_count()
lowerCAmelCase__ : Tuple = num_gpus
lowerCAmelCase__ : List[str] = False
if num_gpus > 1:
lowerCAmelCase__ : Any = '''MULTI_GPU'''
else:
lowerCAmelCase__ : Union[str, Any] = '''NO'''
elif is_xpu_available() and use_xpu:
lowerCAmelCase__ : Optional[Any] = torch.xpu.device_count()
lowerCAmelCase__ : Tuple = num_xpus
lowerCAmelCase__ : List[str] = False
if num_xpus > 1:
lowerCAmelCase__ : Union[str, Any] = '''MULTI_XPU'''
else:
lowerCAmelCase__ : List[Any] = '''NO'''
elif is_npu_available():
lowerCAmelCase__ : Optional[int] = torch.npu.device_count()
lowerCAmelCase__ : List[Any] = num_npus
lowerCAmelCase__ : Optional[int] = False
if num_npus > 1:
lowerCAmelCase__ : Any = '''MULTI_NPU'''
else:
lowerCAmelCase__ : int = '''NO'''
else:
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Tuple = 1
lowerCAmelCase__ : Optional[Any] = '''NO'''
lowerCAmelCase__ : Optional[Any] = ClusterConfig(**A_ )
config.to_json_file(A_ )
return path
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Any = parser.add_parser('''default''' , parents=A_ , help=A_ , formatter_class=A_ )
parser.add_argument(
'''--config_file''' , default=A_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=A_ , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=A_ )
return parser
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : List[str] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f'accelerate configuration saved at {config_file}' )
| 74 | 0 |
from __future__ import annotations
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,__lowerCamelCase=None ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : List[str] = data
lowerCAmelCase__ : List[str] = None
def __repr__(self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = []
lowerCAmelCase__ : int = self
while temp:
string_rep.append(f"""{temp.data}""" )
lowerCAmelCase__ : Union[str, Any] = temp.next
return "->".join(__lowerCamelCase )
def lowerCAmelCase__ ( lowerCamelCase_ : list):
'''simple docstring'''
if not elements_list:
raise Exception('''The Elements List is empty''')
lowerCAmelCase__ : Optional[int] = Node(elements_list[0])
for i in range(1 ,len(lowerCamelCase_)):
lowerCAmelCase__ : List[Any] = Node(elements_list[i])
lowerCAmelCase__ : Tuple = current.next
return head
def lowerCAmelCase__ ( lowerCamelCase_ : Node):
'''simple docstring'''
if head_node is not None and isinstance(lowerCamelCase_ ,lowerCamelCase_):
print_reverse(head_node.next)
print(head_node.data)
def lowerCAmelCase__ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
lowerCAmelCase__ : Any = make_linked_list([14, 52, 14, 12, 43])
print('''Linked List:''')
print(lowerCamelCase_)
print('''Elements in Reverse:''')
print_reverse(lowerCamelCase_)
if __name__ == "__main__":
main()
| 129 |
# Function to print upper half of diamond (pyramid)
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int]):
'''simple docstring'''
for i in range(0 ,lowerCamelCase_):
for _ in range(0 ,n - i - 1): # printing spaces
print(''' ''' ,end='''''')
for _ in range(0 ,i + 1): # printing stars
print('''* ''' ,end='''''')
print()
def lowerCAmelCase__ ( lowerCamelCase_ : str):
'''simple docstring'''
for i in range(lowerCamelCase_ ,0 ,-1):
for _ in range(lowerCamelCase_ ,0 ,-1): # printing stars
print('''* ''' ,end='''''')
print()
for _ in range(n - i + 1 ,0 ,-1): # printing spaces
print(''' ''' ,end='''''')
def lowerCAmelCase__ ( lowerCamelCase_ : Tuple):
'''simple docstring'''
if n <= 0:
print(''' ... .... nothing printing :(''')
return
floyd(lowerCamelCase_) # upper half
reverse_floyd(lowerCamelCase_) # lower half
if __name__ == "__main__":
print(R'| /\ | |- | |- |--| |\ /| |-')
print(R'|/ \| |- |_ |_ |__| | \/ | |_')
__snake_case : int =1
while K:
__snake_case : Optional[int] =int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
__snake_case : str =int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 129 | 1 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: str ):
"""simple docstring"""
return "".join(chr(ord(__UpperCamelCase ) - 32 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 246 |
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, A, A=2, A=56, A=True, A=True, A=True, A=True, A=99, A=32, A=2, A=2, A=7, A="gelu_new", A=0.1, A=0.1, A=512, A=16, A=2, A=0.02, A=4, A="block_sparse", A=True, A=False, A=2, A=3, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : Dict = use_attention_mask
SCREAMING_SNAKE_CASE : List[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = num_choices
SCREAMING_SNAKE_CASE : int = rescale_embeddings
SCREAMING_SNAKE_CASE : Any = attention_type
SCREAMING_SNAKE_CASE : str = use_bias
SCREAMING_SNAKE_CASE : Tuple = block_size
SCREAMING_SNAKE_CASE : List[Any] = num_random_blocks
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[str] = BigBirdConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=A, initializer_range=self.initializer_range, attention_type=self.attention_type, block_size=self.block_size, num_random_blocks=self.num_random_blocks, use_bias=self.use_bias, rescale_embeddings=self.rescale_embeddings, )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
A : List[Any] = False
A : List[str] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_hidden_states_output()
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(A, A )
SCREAMING_SNAKE_CASE : List[str] = model_class(A )
@jax.jit
def model_jitted(A, A=None, **A ):
return model(input_ids=A, attention_mask=A, **A )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE : List[str] = model_jitted(**A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : Optional[Any] = model_jitted(**A ).to_tuple()
self.assertEqual(len(A ), len(A ) )
for jitted_output, output in zip(A, A ):
self.assertEqual(jitted_output.shape, output.shape )
def UpperCamelCase_ ( self, A, A, A, A=1E-5, A="outputs", A=None ):
'''simple docstring'''
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(A, A, A, A, A, A )
| 246 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__snake_case :Any = logging.get_logger(__name__)
__snake_case :Union[str, Any] = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
__snake_case :Optional[Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
__snake_case :List[str] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class _A ( A__ ):
UpperCamelCase__ : Tuple = '''whisper'''
UpperCamelCase__ : Optional[int] = ['''past_key_values''']
UpperCamelCase__ : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Tuple=51_865 , __SCREAMING_SNAKE_CASE : Union[str, Any]=80 , __SCREAMING_SNAKE_CASE : str=6 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : Optional[Any]=6 , __SCREAMING_SNAKE_CASE : Tuple=4 , __SCREAMING_SNAKE_CASE : Optional[Any]=1_536 , __SCREAMING_SNAKE_CASE : Tuple=1_536 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : Optional[int]=50_257 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[int]="gelu" , __SCREAMING_SNAKE_CASE : Tuple=256 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : str=0.02 , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Any=1_500 , __SCREAMING_SNAKE_CASE : List[Any]=448 , __SCREAMING_SNAKE_CASE : Any=50_256 , __SCREAMING_SNAKE_CASE : List[Any]=50_256 , __SCREAMING_SNAKE_CASE : Tuple=50_256 , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : str=[220, 50_256] , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Dict=256 , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Tuple=0.05 , __SCREAMING_SNAKE_CASE : int=10 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : str=10 , __SCREAMING_SNAKE_CASE : Optional[int]=0 , __SCREAMING_SNAKE_CASE : Optional[int]=7 , **__SCREAMING_SNAKE_CASE : Optional[int] , ):
'''simple docstring'''
__a = vocab_size
__a = num_mel_bins
__a = d_model
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_layers
__a = decoder_attention_heads
__a = decoder_ffn_dim
__a = encoder_ffn_dim
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = use_cache
__a = encoder_layers
__a = scale_embedding # scale factor will be sqrt(d_model) if True
__a = max_source_positions
__a = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__a = classifier_proj_size
__a = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a = apply_spec_augment
__a = mask_time_prob
__a = mask_time_length
__a = mask_time_min_masks
__a = mask_feature_prob
__a = mask_feature_length
__a = mask_feature_min_masks
__a = median_filter_width
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , suppress_tokens=__snake_case , begin_suppress_tokens=__snake_case , **__snake_case , )
class _A ( A__ ):
@property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
])
if self.use_past:
__a = {0: '''batch'''}
else:
__a = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction='''inputs''')
return common_inputs
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , __SCREAMING_SNAKE_CASE : int = 22_050 , __SCREAMING_SNAKE_CASE : float = 5.0 , __SCREAMING_SNAKE_CASE : int = 220 , ):
'''simple docstring'''
__a = OrderedDict()
__a = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=__snake_case , framework=__snake_case , sampling_rate=__snake_case , time_duration=__snake_case , frequency=__snake_case , )
__a = encoder_inputs['''input_features'''].shape[2]
__a = encoder_sequence_length // 2 if self.use_past else seq_length
__a = super().generate_dummy_inputs(
preprocessor.tokenizer , __snake_case , __snake_case , __snake_case , __snake_case)
__a = encoder_inputs.pop('''input_features''')
__a = decoder_inputs.pop('''decoder_input_ids''')
if "past_key_values" in decoder_inputs:
__a = decoder_inputs.pop('''past_key_values''')
return dummy_inputs
@property
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return 1E-3
| 49 |
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any]=1000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
UpperCAmelCase : str = n - 1
UpperCAmelCase : List[Any] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
UpperCAmelCase : List[str] = 0
while count < prec:
UpperCAmelCase : int = random.randint(2 , n - 1 )
UpperCAmelCase : List[str] = bin_exp_mod(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if b != 1:
UpperCAmelCase : int = True
for _ in range(_lowerCAmelCase ):
if b == n - 1:
UpperCAmelCase : Dict = False
break
UpperCAmelCase : str = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
UpperCamelCase__: Optional[int] = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 23 | 0 |
def _lowercase ( _UpperCAmelCase ) -> int:
lowerCamelCase =[[0 for _ in range(_UpperCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowerCamelCase =1
for n in range(m + 1 ):
for k in range(1 , _UpperCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase__ : int =int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase__ : int =int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 262 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _lowercase ( _UpperCAmelCase = "isbn/0140328726" ) -> dict:
lowerCamelCase =olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
lowerCamelCase =F"""{olid} is not a valid Open Library olid"""
raise ValueError(_UpperCAmelCase )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def _lowercase ( _UpperCAmelCase ) -> dict:
lowerCamelCase ={
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
lowerCamelCase ={better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowerCamelCase =[
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
lowerCamelCase =data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase =""", """.join(_UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
UpperCAmelCase__ : List[str] =input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.")
continue
print(F"\nSearching Open Library for ISBN: {isbn}...\n")
try:
UpperCAmelCase__ : Dict =summarize_book(get_openlibrary_data(F"isbn/{isbn}"))
print('''\n'''.join(F"{key}: {value}" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"Sorry, there are no results for ISBN: {isbn}.")
| 262 | 1 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(SCREAMING_SNAKE_CASE_ ) for s in shape] )}.npy'''
def _lowercase ( self : List[str] ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=(4, 4, 6_4, 6_4) , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> List[str]:
lowercase_ = jnp.bfloataa if fpaa else jnp.floataa
lowercase_ = jnp.array(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , dtype=SCREAMING_SNAKE_CASE_ )
return image
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : Any="CompVis/stable-diffusion-v1-4" ) -> str:
lowercase_ = jnp.bfloataa if fpaa else jnp.floataa
lowercase_ = '''bf16''' if fpaa else None
lowercase_ , lowercase_ = FlaxUNetaDConditionModel.from_pretrained(
SCREAMING_SNAKE_CASE_ , subfolder='''unet''' , dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ )
return model, params
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : Dict=(4, 7_7, 7_6_8) , SCREAMING_SNAKE_CASE_ : List[str]=False ) -> Dict:
lowercase_ = jnp.bfloataa if fpaa else jnp.floataa
lowercase_ = jnp.array(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , dtype=SCREAMING_SNAKE_CASE_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]],
[1_7, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]],
[8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]],
[3, 1_0_0_0, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]],
# fmt: on
] )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> int:
lowercase_ , lowercase_ = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_latents(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_encoder_hidden_states(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
lowercase_ = model.apply(
{'''params''': params} , SCREAMING_SNAKE_CASE_ , jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ) , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , ).sample
assert sample.shape == latents.shape
lowercase_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowercase_ = jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]],
[1_7, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]],
[8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]],
[3, 1_0_0_0, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]],
# fmt: on
] )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> Dict:
lowercase_ , lowercase_ = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_latents(SCREAMING_SNAKE_CASE_ , shape=(4, 4, 9_6, 9_6) , fpaa=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_encoder_hidden_states(SCREAMING_SNAKE_CASE_ , shape=(4, 7_7, 1_0_2_4) , fpaa=SCREAMING_SNAKE_CASE_ )
lowercase_ = model.apply(
{'''params''': params} , SCREAMING_SNAKE_CASE_ , jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ) , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , ).sample
assert sample.shape == latents.shape
lowercase_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowercase_ = jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
| 30 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : str = LayoutLMTokenizer
UpperCAmelCase : int = LayoutLMTokenizerFast
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Optional[Any] = True
def __snake_case ( self : Optional[int]):
super().setUp()
a : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str):
a : Tuple = "UNwant\u00E9d,running"
a : Dict = "unwanted, running"
return input_text, output_text
def __snake_case ( self : Any):
a : List[Any] = self.tokenizer_class(self.vocab_file)
a : str = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9])
def __snake_case ( self : Dict):
pass
| 40 | 0 |
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_snake_case : Union[str, Any] = logging.get_logger(__name__)
_snake_case : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_snake_case : Dict = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_snake_case : Union[str, Any] = {
"Salesforce/codegen-350M-mono": 2_048,
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : int = ["input_ids", "attention_mask"]
__UpperCAmelCase : Tuple = CodeGenTokenizer
def __init__( self : List[str] , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : List[str]=None , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : Optional[Any]="<|endoftext|>" , lowerCamelCase : Dict="<|endoftext|>" , lowerCamelCase : Any="<|endoftext|>" , lowerCamelCase : Optional[Any]=False , **lowerCamelCase : int , ) -> Dict:
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
if kwargs.pop("add_bos_token" , lowerCamelCase ):
__snake_case : List[str] = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
F'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
F'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
__snake_case : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__snake_case : List[str] = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__snake_case : Tuple = add_prefix_space
__snake_case : int = pre_tok_class(**lowerCamelCase )
__snake_case : Optional[int] = add_prefix_space
def __snake_case ( self : int , *lowerCamelCase : List[Any] , **lowerCamelCase : List[Any] ) -> BatchEncoding:
__snake_case : Optional[Any] = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Optional[int] , *lowerCamelCase : List[Any] , **lowerCamelCase : Union[str, Any] ) -> BatchEncoding:
__snake_case : List[str] = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
__snake_case : List[Any] = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def __snake_case ( self : Dict , lowerCamelCase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , lowerCamelCase : bool = False , lowerCamelCase : bool = None , lowerCamelCase : Optional[List[str]] = None , **lowerCamelCase : Tuple , ) -> str:
__snake_case : Any = super().decode(
token_ids=lowerCamelCase , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase , **lowerCamelCase , )
if truncate_before_pattern is not None and len(lowerCamelCase ) > 0:
__snake_case : Union[str, Any] = self.truncate(lowerCamelCase , lowerCamelCase )
return decoded_text
def __snake_case ( self : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : List[str] ) -> Optional[Any]:
def find_re(lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Dict ):
__snake_case : Optional[int] = pattern.search(lowerCamelCase , lowerCamelCase )
return m.start() if m else -1
__snake_case : Dict = [re.compile(lowerCamelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
__snake_case : Optional[Any] = list(re.finditer("^print" , lowerCamelCase , re.MULTILINE ) )
if len(lowerCamelCase ) > 1:
__snake_case : Optional[int] = completion[: prints[1].start()]
__snake_case : List[str] = list(re.finditer("^def" , lowerCamelCase , re.MULTILINE ) )
if len(lowerCamelCase ) > 1:
__snake_case : Optional[Any] = completion[: defs[1].start()]
__snake_case : Dict = 0
__snake_case : Optional[Any] = [
pos for pos in [find_re(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for terminal in terminals] if pos != -1
]
if len(lowerCamelCase ) > 0:
return completion[: min(lowerCamelCase )]
else:
return completion
| 370 |
from scipy.stats import spearmanr
import datasets
_snake_case : Optional[Any] = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
_snake_case : Dict = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
_snake_case : Optional[Any] = R"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a (datasets.Metric ):
"""simple docstring"""
def __snake_case ( self : Optional[int] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def __snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Tuple=False ) -> Dict:
__snake_case : Optional[int] = spearmanr(lowerCamelCase , lowerCamelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 134 | 0 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
UpperCamelCase = '''
import os
'''
UpperCamelCase = '''
def foo():
import os
return False
'''
UpperCamelCase = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
UpperCamelCase = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
UpperCamelCase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , __lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict:
A: Tuple = os.path.join(__lowercase , '''test_file.py''' )
with open(__lowercase , '''w''' ) as _tmp_file:
_tmp_file.write(__lowercase )
A: List[Any] = get_imports(__lowercase )
assert parsed_imports == ["os"]
| 319 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
from transformers.testing_utils import pytest_terminal_summary_main
A: Optional[int] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__lowercase , id=__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A: Tuple = 0
# Doctest custom flag to ignore output.
UpperCamelCase = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCamelCase = doctest.OutputChecker
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = CustomOutputChecker
UpperCamelCase = HfDoctestModule
UpperCamelCase = HfDocTestParser
| 319 | 1 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
snake_case : List[Any] = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
snake_case : Optional[Any] = F"https://www.google.com/search?q={query}&num=100"
snake_case : Any = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
snake_case : Tuple = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
snake_case : Union[str, Any] = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)['''url'''][0]
webbrowser.open(link)
| 367 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : List[Any] = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'roformer'
def __init__( self , _a=50_000 , _a=None , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1_536 , _a=2 , _a=0.02 , _a=1e-12 , _a=0 , _a=False , _a=True , **_a , ):
super().__init__(pad_token_id=_a , **_a )
__magic_name__ : Tuple = vocab_size
__magic_name__ : Dict = hidden_size if embedding_size is None else embedding_size
__magic_name__ : int = hidden_size
__magic_name__ : int = num_hidden_layers
__magic_name__ : Union[str, Any] = num_attention_heads
__magic_name__ : Union[str, Any] = hidden_act
__magic_name__ : Optional[int] = intermediate_size
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : Tuple = max_position_embeddings
__magic_name__ : str = type_vocab_size
__magic_name__ : Dict = initializer_range
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : Optional[int] = rotary_value
__magic_name__ : List[Any] = use_cache
class _snake_case ( snake_case ):
@property
def SCREAMING_SNAKE_CASE ( self ):
if self.task == "multiple-choice":
__magic_name__ : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
__magic_name__ : str = {0: "batch", 1: "sequence"}
__magic_name__ : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 41 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
snake_case : Optional[int] = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[str] = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
snake_case : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 94 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowerCAmelCase ( __a ):
_lowercase ='''megatron-bert'''
def __init__( self , _UpperCamelCase=29_056 , _UpperCamelCase=1_024 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=4_096 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-1_2 , _UpperCamelCase=0 , _UpperCamelCase="absolute" , _UpperCamelCase=True , **_UpperCamelCase , ) -> int:
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = use_cache
| 231 | 0 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
lowerCamelCase = True
from torch.cuda.amp import autocast
lowerCamelCase = logging.getLogger(__name__)
@dataclass
class _a :
_a : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''})
_a : Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_a : Optional[bool] = field(
default=_lowercase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''})
_a : Optional[bool] = field(
default=_lowercase , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
_a : Optional[float] = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''})
_a : Optional[float] = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''})
_a : Optional[float] = field(
default=0.99_9995 , metadata={'''help''': '''Decay of gumbel temperature during training.'''})
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowerCAmelCase__ : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
lowerCAmelCase__ : List[Any] = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
lowerCAmelCase__ : Tuple = logging.INFO
logger.setLevel(_a )
@dataclass
class _a :
_a : str = field(
default=_lowercase , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''})
_a : Optional[str] = field(
default=_lowercase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''})
_a : Optional[str] = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
_a : Optional[str] = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
_a : Optional[str] = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
_a : bool = field(
default=_lowercase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''})
_a : Optional[int] = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
_a : Optional[int] = field(
default=_lowercase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_a : Optional[float] = field(
default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''})
@dataclass
class _a :
_a : WavaVecaForPreTraining
_a : WavaVecaFeatureExtractor
_a : Union[bool, str] = "longest"
_a : Optional[int] = None
_a : Optional[int] = None
def __call__( self : List[Any] , _SCREAMING_SNAKE_CASE : List[Dict[str, Union[List[int], torch.Tensor]]] )-> Dict[str, torch.Tensor]:
# reformat list to dict and set to pytorch format
lowerCAmelCase__ : Union[str, Any] = self.feature_extractor.pad(
_SCREAMING_SNAKE_CASE , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
lowerCAmelCase__ : List[str] = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
lowerCAmelCase__ : Union[str, Any] = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowerCAmelCase__ : Optional[int] = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
lowerCAmelCase__ : Any = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowerCAmelCase__ : str = 1
lowerCAmelCase__ : Tuple = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowerCAmelCase__ : str = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_SCREAMING_SNAKE_CASE , min_masks=2 , )
return batch
class _a ( _lowercase):
def __init__( self : Any , *_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any]=1 , _SCREAMING_SNAKE_CASE : Optional[int]=0 , _SCREAMING_SNAKE_CASE : Optional[int]=1.0 , **_SCREAMING_SNAKE_CASE : Optional[int] )-> Any:
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : List[Any] = max_gumbel_temp
lowerCAmelCase__ : str = min_gumbel_temp
lowerCAmelCase__ : Optional[int] = gumbel_temp_decay
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : nn.Module , _SCREAMING_SNAKE_CASE : Dict[str, Union[torch.Tensor, Any]] )-> torch.Tensor:
model.train()
lowerCAmelCase__ : Tuple = self._prepare_inputs(_SCREAMING_SNAKE_CASE )
if self.use_amp:
with autocast():
lowerCAmelCase__ : Union[str, Any] = self.compute_loss(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase__ : Tuple = self.compute_loss(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowerCAmelCase__ : Tuple = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowerCAmelCase__ : Tuple = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(F'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
lowerCAmelCase__ : Optional[Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_SCREAMING_SNAKE_CASE ).backward()
elif self.use_apex:
with amp.scale_loss(_SCREAMING_SNAKE_CASE , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_SCREAMING_SNAKE_CASE )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase__ : Any = parser.parse_args_into_dataclasses()
configure_logger(_a , _a )
# Downloading and loading a dataset from the hub.
lowerCAmelCase__ : Dict = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowerCAmelCase__ : int = DatasetDict()
lowerCAmelCase__ : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , )
lowerCAmelCase__ : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowerCAmelCase__ : Tuple = DatasetDict()
lowerCAmelCase__ : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
lowerCAmelCase__ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowerCAmelCase__ : Tuple = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_a )
def prepare_dataset(_a ):
# check that all files have the correct sampling rate
lowerCAmelCase__ : Dict = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
lowerCAmelCase__ : Any = datasets.map(
_a , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
lowerCAmelCase__ : str = vectorized_datasets.filter(
lambda _a : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_a ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
lowerCAmelCase__ : List[Any] = vectorized_datasets.map(
_a , batched=_a , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowerCAmelCase__ : Dict = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
lowerCAmelCase__ : Dict = WavaVecaForPreTraining(_a )
lowerCAmelCase__ : Optional[int] = DataCollatorForWavaVecaPretraining(model=_a , feature_extractor=_a )
lowerCAmelCase__ : Tuple = WavaVecaPreTrainer(
model=_a , data_collator=_a , args=_a , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=_a , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 354 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _a ( _lowercase):
_a : Dict = '''convbert'''
def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : str=3_0522 , _SCREAMING_SNAKE_CASE : Union[str, Any]=768 , _SCREAMING_SNAKE_CASE : Tuple=12 , _SCREAMING_SNAKE_CASE : str=12 , _SCREAMING_SNAKE_CASE : Any=3072 , _SCREAMING_SNAKE_CASE : List[Any]="gelu" , _SCREAMING_SNAKE_CASE : Dict=0.1 , _SCREAMING_SNAKE_CASE : List[str]=0.1 , _SCREAMING_SNAKE_CASE : int=512 , _SCREAMING_SNAKE_CASE : Any=2 , _SCREAMING_SNAKE_CASE : Optional[int]=0.02 , _SCREAMING_SNAKE_CASE : Optional[int]=1E-12 , _SCREAMING_SNAKE_CASE : Union[str, Any]=1 , _SCREAMING_SNAKE_CASE : Optional[int]=0 , _SCREAMING_SNAKE_CASE : Dict=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=768 , _SCREAMING_SNAKE_CASE : Optional[Any]=2 , _SCREAMING_SNAKE_CASE : Tuple=9 , _SCREAMING_SNAKE_CASE : int=1 , _SCREAMING_SNAKE_CASE : Tuple=None , **_SCREAMING_SNAKE_CASE : List[str] , )-> List[str]:
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : List[Any] = hidden_size
lowerCAmelCase__ : Tuple = num_hidden_layers
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : Optional[Any] = intermediate_size
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : Dict = hidden_dropout_prob
lowerCAmelCase__ : str = attention_probs_dropout_prob
lowerCAmelCase__ : str = max_position_embeddings
lowerCAmelCase__ : Tuple = type_vocab_size
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : List[Any] = layer_norm_eps
lowerCAmelCase__ : int = embedding_size
lowerCAmelCase__ : Union[str, Any] = head_ratio
lowerCAmelCase__ : Optional[int] = conv_kernel_size
lowerCAmelCase__ : List[str] = num_groups
lowerCAmelCase__ : Dict = classifier_dropout
class _a ( _lowercase):
@property
def UpperCAmelCase__( self : Tuple )-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase__ : List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 211 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : List[Any] = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
lowercase__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 224 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_lowercase : Tuple = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = ['pixel_values']
def __init__( self : Optional[Any], lowerCamelCase : bool = True, lowerCamelCase : Union[int, float] = 1 / 255, lowerCamelCase : bool = True, lowerCamelCase : int = 8, **lowerCamelCase : Tuple, )-> None:
super().__init__(**lowerCamelCase )
lowerCamelCase__ : int =do_rescale
lowerCamelCase__ : Dict =rescale_factor
lowerCamelCase__ : Union[str, Any] =do_pad
lowerCamelCase__ : Union[str, Any] =pad_size
def snake_case ( self : int, lowerCamelCase : np.ndarray, lowerCamelCase : float, lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : int )-> np.ndarray:
return rescale(lowerCamelCase, scale=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase )
def snake_case ( self : Optional[Any], lowerCamelCase : np.ndarray, lowerCamelCase : int, lowerCamelCase : Optional[Union[str, ChannelDimension]] = None )-> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =get_image_size(lowerCamelCase )
lowerCamelCase__ : List[str] =(old_height // size + 1) * size - old_height
lowerCamelCase__ : List[str] =(old_width // size + 1) * size - old_width
return pad(lowerCamelCase, ((0, pad_height), (0, pad_width)), mode='''symmetric''', data_format=lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : ImageInput, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[float] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, lowerCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST, **lowerCamelCase : Union[str, Any], )-> Dict:
lowerCamelCase__ : List[str] =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : str =do_pad if do_pad is not None else self.do_pad
lowerCamelCase__ : int =pad_size if pad_size is not None else self.pad_size
lowerCamelCase__ : Optional[int] =make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowerCamelCase__ : Tuple =[to_numpy_array(lowerCamelCase ) for image in images]
if do_rescale:
lowerCamelCase__ : Tuple =[self.rescale(image=lowerCamelCase, scale=lowerCamelCase ) for image in images]
if do_pad:
lowerCamelCase__ : Tuple =[self.pad(lowerCamelCase, size=lowerCamelCase ) for image in images]
lowerCamelCase__ : int =[to_channel_dimension_format(lowerCamelCase, lowerCamelCase ) for image in images]
lowerCamelCase__ : Dict ={'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase, tensor_type=lowerCamelCase )
| 238 | 0 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_SCREAMING_SNAKE_CASE = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
require_version(deps[pkg] , __a )
| 369 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Optional[int] = None
if token is not None:
snake_case_ : List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
snake_case_ : Union[str, Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
snake_case_ : Optional[int] = requests.get(__a , headers=__a ).json()
snake_case_ : List[str] = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
snake_case_ : Dict = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(__a ):
snake_case_ : Optional[Any] = requests.get(url + f"""&page={i + 2}""" , headers=__a ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Union[str, Any] = None
if token is not None:
snake_case_ : List[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
snake_case_ : Optional[Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
snake_case_ : Union[str, Any] = requests.get(__a , headers=__a ).json()
snake_case_ : Any = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
snake_case_ : str = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(__a ):
snake_case_ : int = requests.get(url + f"""&page={i + 2}""" , headers=__a ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a ):
snake_case_ : Dict = None
if token is not None:
snake_case_ : List[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
snake_case_ : Optional[int] = requests.get(__a , headers=__a , allow_redirects=__a )
snake_case_ : str = result.headers['Location']
snake_case_ : List[str] = requests.get(__a , allow_redirects=__a )
snake_case_ : Optional[Any] = os.path.join(__a , f"""{artifact_name}.zip""" )
with open(__a , 'wb' ) as fp:
fp.write(response.content )
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Any = []
snake_case_ : Any = []
snake_case_ : Tuple = None
with zipfile.ZipFile(__a ) as z:
for filename in z.namelist():
if not os.path.isdir(__a ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__a ) as f:
for line in f:
snake_case_ : Tuple = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case_ : Tuple = line[: line.index(': ' )]
snake_case_ : Union[str, Any] = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
snake_case_ : Any = line[len('FAILED ' ) :]
failed_tests.append(__a )
elif filename == "job_name.txt":
snake_case_ : Union[str, Any] = line
if len(__a ) != len(__a ):
raise ValueError(
f"""`errors` and `failed_tests` should have the same number of elements. Got {len(__a )} for `errors` """
f"""and {len(__a )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
' problem.' )
snake_case_ : List[str] = None
if job_name and job_links:
snake_case_ : Union[str, Any] = job_links.get(__a , __a )
# A list with elements of the form (line of error, error, failed test)
snake_case_ : Optional[Any] = [x + [y] + [job_link] for x, y in zip(__a , __a )]
return result
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Any = []
snake_case_ : Any = [os.path.join(__a , __a ) for p in os.listdir(__a ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__a , job_links=__a ) )
return errors
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Optional[int] = Counter()
counter.update([x[1] for x in logs] )
snake_case_ : str = counter.most_common()
snake_case_ : Tuple = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case_ : int = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case_ : int = dict(sorted(r.items() , key=lambda __a : item[1]["count"] , reverse=__a ) )
return r
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Tuple = test.split('::' )[0]
if test.startswith('tests/models/' ):
snake_case_ : List[str] = test.split('/' )[2]
else:
snake_case_ : Union[str, Any] = None
return test
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Optional[int] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case_ : str = [x for x in logs if x[2] is not None]
snake_case_ : int = {x[2] for x in logs}
snake_case_ : Dict = {}
for test in tests:
snake_case_ : List[str] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case_ : Any = counter.most_common()
snake_case_ : str = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case_ : Tuple = sum(error_counts.values() )
if n_errors > 0:
snake_case_ : List[Any] = {'count': n_errors, 'errors': error_counts}
snake_case_ : int = dict(sorted(r.items() , key=lambda __a : item[1]["count"] , reverse=__a ) )
return r
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Optional[Any] = '| no. | error | status |'
snake_case_ : str = '|-:|:-|:-|'
snake_case_ : Tuple = [header, sep]
for error in reduced_by_error:
snake_case_ : Dict = reduced_by_error[error]['count']
snake_case_ : List[str] = f"""| {count} | {error[:1_00]} | |"""
lines.append(__a )
return "\n".join(__a )
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Optional[Any] = '| model | no. of errors | major error | count |'
snake_case_ : Union[str, Any] = '|-:|-:|-:|-:|'
snake_case_ : Optional[int] = [header, sep]
for model in reduced_by_model:
snake_case_ : Any = reduced_by_model[model]['count']
snake_case_ ,snake_case_ : Dict = list(reduced_by_model[model]['errors'].items() )[0]
snake_case_ : Any = f"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(__a )
return "\n".join(__a )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token)
_SCREAMING_SNAKE_CASE = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_SCREAMING_SNAKE_CASE = k.find(""" / """)
_SCREAMING_SNAKE_CASE = k[index + len(""" / """) :]
_SCREAMING_SNAKE_CASE = v
with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_SCREAMING_SNAKE_CASE = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_SCREAMING_SNAKE_CASE = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = reduce_by_error(errors)
_SCREAMING_SNAKE_CASE = reduce_by_model(errors)
_SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error)
_SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
| 88 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """realm"""
def __init__( self , lowercase_=3_0522 , lowercase_=768 , lowercase_=128 , lowercase_=12 , lowercase_=12 , lowercase_=8 , lowercase_=3072 , lowercase_="gelu_new" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1E-1_2 , lowercase_=256 , lowercase_=10 , lowercase_=1E-3 , lowercase_=5 , lowercase_=320 , lowercase_=1335_3718 , lowercase_=5000 , lowercase_=1 , lowercase_=0 , lowercase_=2 , **lowercase_ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
# Common config
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : Dict = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : List[str] = retriever_proj_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : Optional[int] = num_candidates
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Optional[int] = type_vocab_size
UpperCAmelCase_ : List[str] = layer_norm_eps
# Reader config
UpperCAmelCase_ : int = span_hidden_size
UpperCAmelCase_ : Optional[Any] = max_span_width
UpperCAmelCase_ : Dict = reader_layer_norm_eps
UpperCAmelCase_ : Optional[int] = reader_beam_size
UpperCAmelCase_ : Union[str, Any] = reader_seq_len
# Retrieval config
UpperCAmelCase_ : Union[str, Any] = num_block_records
UpperCAmelCase_ : Optional[Any] = searcher_beam_size
| 61 |
'''simple docstring'''
from typing import List
import numpy as np
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase ={key: len(_lowerCAmelCase ) for key, value in gen_kwargs.items() if isinstance(_lowerCAmelCase , _lowerCAmelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
__lowercase =max(lists_lengths.values() , default=0 )
return max(1 , _lowerCAmelCase )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[]
for group_idx in range(_lowerCAmelCase ):
__lowercase =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__lowercase =shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__lowercase =range(_lowerCAmelCase , start + num_shards_to_add )
shards_indices_per_group.append(_lowerCAmelCase )
return shards_indices_per_group
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =_number_of_shards_in_gen_kwargs(_lowerCAmelCase )
if num_shards == 1:
return [dict(_lowerCAmelCase )]
else:
__lowercase =_distribute_shards(num_shards=_lowerCAmelCase , max_num_jobs=_lowerCAmelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(_lowerCAmelCase ) )
]
def _A ( _lowerCAmelCase ):
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , _lowerCAmelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase ={len(_lowerCAmelCase ) for value in gen_kwargs.values() if isinstance(_lowerCAmelCase , _lowerCAmelCase )}
__lowercase ={}
for size in list_sizes:
__lowercase =list(range(_lowerCAmelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__lowercase =dict(_lowerCAmelCase )
for key, value in shuffled_kwargs.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase =[value[i] for i in indices_per_size[len(_lowerCAmelCase )]]
return shuffled_kwargs
| 166 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = PhobertTokenizer
lowercase__ = False
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
__UpperCamelCase = dict(zip(__a , range(len(__a))))
__UpperCamelCase = ['''#version: 0.2''', '''l à</w>''']
__UpperCamelCase = {'''unk_token''': '''<unk>'''}
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(__a))
def UpperCAmelCase ( self , **__a) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return PhobertTokenizer.from_pretrained(self.tmpdirname , **__a)
def UpperCAmelCase ( self , __a) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = '''Tôi là VinAI Research'''
__UpperCamelCase = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
__UpperCamelCase = '''Tôi là VinAI Research'''
__UpperCamelCase = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
__UpperCamelCase = tokenizer.tokenize(__a)
print(__a)
self.assertListEqual(__a , __a)
__UpperCamelCase = tokens + [tokenizer.unk_token]
__UpperCamelCase = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a) , __a)
| 351 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""microsoft/biogpt""": """https://huggingface.co/microsoft/biogpt/resolve/main/config.json""",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'biogpt'
def __init__( self , __a=4_23_84 , __a=10_24 , __a=24 , __a=16 , __a=40_96 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10_24 , __a=0.02 , __a=1e-12 , __a=True , __a=True , __a=0.0 , __a=0.0 , __a=1 , __a=0 , __a=2 , **__a , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_cache
_UpperCamelCase = layerdrop
_UpperCamelCase = activation_dropout
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a)
| 100 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : List[Any] = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 99 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Any = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : str = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : List[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Any = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : int = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
| 283 | 0 |
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ = 1_0_0_0 )-> int:
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 350 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :int = KandinskyVaaImgaImgPipeline
UpperCamelCase_ :Union[str, Any] = ["""image_embeds""", """negative_image_embeds""", """image"""]
UpperCamelCase_ :Dict = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
UpperCamelCase_ :Tuple = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase_ :int = False
@property
def UpperCAmelCase_ ( self )-> List[str]:
return 32
@property
def UpperCAmelCase_ ( self )-> List[Any]:
return 32
@property
def UpperCAmelCase_ ( self )-> Tuple:
return self.time_input_dim
@property
def UpperCAmelCase_ ( self )-> Optional[Any]:
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self )-> Any:
return 100
@property
def UpperCAmelCase_ ( self )-> Tuple:
torch.manual_seed(0 )
UpperCamelCase_ = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase_ = UNetaDConditionModel(**_lowercase )
return model
@property
def UpperCAmelCase_ ( self )-> List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self )-> Any:
torch.manual_seed(0 )
UpperCamelCase_ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self )-> Tuple:
UpperCamelCase_ = self.dummy_unet
UpperCamelCase_ = self.dummy_movq
UpperCamelCase_ = {
"num_train_timesteps": 1_000,
"beta_schedule": "linear",
"beta_start": 0.00_085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
UpperCamelCase_ = DDIMScheduler(**_lowercase )
UpperCamelCase_ = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase_ ( self , _lowercase , _lowercase=0 )-> Tuple:
UpperCamelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowercase ) ).to(_lowercase )
UpperCamelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowercase )
# create init_image
UpperCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase )
UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase_ = Image.fromarray(np.uinta(_lowercase ) ).convert("RGB" ).resize((256, 256) )
if str(_lowercase ).startswith("mps" ):
UpperCamelCase_ = torch.manual_seed(_lowercase )
else:
UpperCamelCase_ = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
UpperCamelCase_ = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = "cpu"
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = self.pipeline_class(**_lowercase )
UpperCamelCase_ = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCamelCase_ = pipe(**self.get_dummy_inputs(_lowercase ) )
UpperCamelCase_ = output.images
UpperCamelCase_ = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
UpperCamelCase_ = image[0, -3:, -3:, -1]
UpperCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_ = np.array(
[0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self )-> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
UpperCamelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCamelCase_ = "A red cartoon frog, 4k"
UpperCamelCase_ = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowercase )
UpperCamelCase_ = KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCamelCase_ = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
UpperCamelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase_ , UpperCamelCase_ = pipe_prior(
_lowercase , generator=_lowercase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase_ = pipeline(
image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , generator=_lowercase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
UpperCamelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
| 60 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowercase__ = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
lowercase__ = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
lowercase__ = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Any ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install \"sacrebleu>=1.4.12\"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Dict = False , lowercase_ : int = False , lowercase_ : Optional[int] = False , lowercase_ : Dict = False , ) -> Optional[int]:
UpperCAmelCase : str = len(references[0] )
if any(len(lowercase_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
UpperCAmelCase : str = [[refs[i] for refs in references] for i in range(lowercase_ )]
UpperCAmelCase : Optional[int] = TER(
normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , )
UpperCAmelCase : Union[str, Any] = sb_ter.corpus_score(lowercase_ , lowercase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 151 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46 | 0 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
UpperCAmelCase__ = TypeVar("T")
UpperCAmelCase__ = Union[List[T], Tuple[T, ...]]
UpperCAmelCase__ = Union[T, List[T], Dict[str, T]]
UpperCAmelCase__ = Union[str, bytes, os.PathLike]
| 290 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
UpperCAmelCase__ = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class __lowerCAmelCase :
def __init__( self : List[str] , A : int = 14) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError('Unsupported Group')
_UpperCAmelCase = primes[group]['prime']
_UpperCAmelCase = primes[group]['generator']
_UpperCAmelCase = int(hexlify(urandom(32)) , base=16)
def _lowerCamelCase ( self : int) -> str:
"""simple docstring"""
return hex(self.__private_key)[2:]
def _lowerCamelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = pow(self.generator , self.__private_key , self.prime)
return hex(A)[2:]
def _lowerCamelCase ( self : Tuple , A : int) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(A , (self.prime - 1) // 2 , self.prime) == 1
)
def _lowerCamelCase ( self : Optional[int] , A : str) -> str:
"""simple docstring"""
_UpperCAmelCase = int(A , base=16)
if not self.is_valid_public_key(A):
raise ValueError('Invalid public key')
_UpperCAmelCase = pow(A , self.__private_key , self.prime)
return shaaaa(str(A).encode()).hexdigest()
@staticmethod
def _lowerCamelCase ( A : int , A : int) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(A , (prime - 1) // 2 , A) == 1
)
@staticmethod
def _lowerCamelCase ( A : str , A : str , A : int = 14) -> str:
"""simple docstring"""
_UpperCAmelCase = int(A , base=16)
_UpperCAmelCase = int(A , base=16)
_UpperCAmelCase = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(A , A):
raise ValueError('Invalid public key')
_UpperCAmelCase = pow(A , A , A)
return shaaaa(str(A).encode()).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 | 1 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__snake_case = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def __lowerCAmelCase ( lowercase : Tuple ) -> str:
"""simple docstring"""
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def __lowerCAmelCase ( lowercase : List[Any] ) -> int:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase )
def __lowerCAmelCase ( lowercase : str ) -> str:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
snake_case : List[str] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(lowercase , id=lowercase )
def __lowerCAmelCase ( lowercase : Union[str, Any] , lowercase : int ) -> List[str]:
"""simple docstring"""
if exitstatus == 5:
snake_case : int = 0
# Doctest custom flag to ignore output.
__snake_case = doctest.register_optionflag("""IGNORE_RESULT""")
__snake_case = doctest.OutputChecker
class _lowerCAmelCase ( snake_case_ ):
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__snake_case = CustomOutputChecker
__snake_case = HfDoctestModule
__snake_case = HfDocTestParser
| 203 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : int ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
snake_case : Dict = 4
snake_case : str = (1 << p) - 1
for _ in range(p - 2 ):
snake_case : Any = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 203 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_a ) , "Tatoeba directory does not exist." )
class a ( unittest.TestCase ):
@cached_property
def A_ ( self : List[Any] ):
snake_case_ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_a )
@slow
def A_ ( self : Optional[Any] ):
self.resolver.convert_models(['''heb-eng'''] )
@slow
def A_ ( self : Any ):
snake_case_ = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_a )
assert mmeta["long_pair"] == "heb-eng"
| 370 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : Optional[Any] = {'vocab_file': 'spiece.model'}
a : Tuple = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
a : Dict = {'bert_for_seq_generation': 512}
class a ( _lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = []
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self : Any , lowercase_ : str , lowercase_ : Optional[Any]="<s>" , lowercase_ : Any="</s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : List[Any]="<pad>" , lowercase_ : List[str]="<::::>" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Optional[int] , ):
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , sep_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def A_ ( self : int ):
return self.sp_model.get_piece_size()
def A_ ( self : Union[str, Any] ):
snake_case_ = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : Any , lowercase_ : Optional[int] ):
snake_case_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A_ ( self : Any , lowercase_ : str ):
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def A_ ( self : Optional[int] , lowercase_ : Union[str, Any] ):
return self.sp_model.piece_to_id(lowercase_ )
def A_ ( self : Dict , lowercase_ : str ):
snake_case_ = self.sp_model.IdToPiece(lowercase_ )
return token
def A_ ( self : Optional[int] , lowercase_ : List[Any] ):
snake_case_ = []
snake_case_ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
snake_case_ = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def A_ ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not os.path.isdir(lowercase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , '''wb''' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 72 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = tempfile.mkdtemp()
lowercase : Optional[int] = BlipImageProcessor()
lowercase : List[str] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
lowercase : Optional[Any] = BlipProcessor(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE_ ).tokenizer
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE_ ).image_processor
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
lowercase : Tuple = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
lowercase : Tuple = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ ,padding_value=1.0 )
lowercase : Optional[int] = BlipProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=SCREAMING_SNAKE_CASE_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,SCREAMING_SNAKE_CASE_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.get_image_processor()
lowercase : Union[str, Any] = self.get_tokenizer()
lowercase : List[Any] = BlipProcessor(tokenizer=SCREAMING_SNAKE_CASE_ ,image_processor=SCREAMING_SNAKE_CASE_ )
lowercase : List[str] = self.prepare_image_inputs()
lowercase : List[Any] = image_processor(SCREAMING_SNAKE_CASE_ ,return_tensors="""np""" )
lowercase : List[str] = processor(images=SCREAMING_SNAKE_CASE_ ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.get_image_processor()
lowercase : Tuple = self.get_tokenizer()
lowercase : Optional[int] = BlipProcessor(tokenizer=SCREAMING_SNAKE_CASE_ ,image_processor=SCREAMING_SNAKE_CASE_ )
lowercase : List[Any] = 'lower newer'
lowercase : str = processor(text=SCREAMING_SNAKE_CASE_ )
lowercase : List[str] = tokenizer(SCREAMING_SNAKE_CASE_ ,return_token_type_ids=SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.get_image_processor()
lowercase : List[Any] = self.get_tokenizer()
lowercase : Union[str, Any] = BlipProcessor(tokenizer=SCREAMING_SNAKE_CASE_ ,image_processor=SCREAMING_SNAKE_CASE_ )
lowercase : Tuple = 'lower newer'
lowercase : Tuple = self.prepare_image_inputs()
lowercase : Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE_ ,images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.get_image_processor()
lowercase : Union[str, Any] = self.get_tokenizer()
lowercase : Any = BlipProcessor(tokenizer=SCREAMING_SNAKE_CASE_ ,image_processor=SCREAMING_SNAKE_CASE_ )
lowercase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase : Any = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
lowercase : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.get_image_processor()
lowercase : Dict = self.get_tokenizer()
lowercase : Optional[int] = BlipProcessor(tokenizer=SCREAMING_SNAKE_CASE_ ,image_processor=SCREAMING_SNAKE_CASE_ )
lowercase : Tuple = 'lower newer'
lowercase : str = self.prepare_image_inputs()
lowercase : Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE_ ,images=SCREAMING_SNAKE_CASE_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
| 20 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=18, SCREAMING_SNAKE_CASE_=30, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, ) -> Union[str, Any]:
UpperCamelCase : str = size if size is not None else {'height': 18, 'width': 18}
UpperCamelCase : int = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : int = num_channels
UpperCamelCase : Any = image_size
UpperCamelCase : Optional[int] = min_resolution
UpperCamelCase : Optional[Any] = max_resolution
UpperCamelCase : Union[str, Any] = do_resize
UpperCamelCase : List[Any] = size
UpperCamelCase : int = do_normalize
def snake_case_ ( self ) -> Tuple:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Any = ImageGPTImageProcessor if is_vision_available() else None
def snake_case_ ( self ) -> int:
UpperCamelCase : str = ImageGPTImageProcessingTester(self )
@property
def snake_case_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ) -> str:
UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'clusters' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'size' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_normalize' ) )
def snake_case_ ( self ) -> str:
UpperCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'height': 18, 'width': 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {'height': 42, 'width': 42} )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase : int = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_, obj[key] ) )
else:
self.assertEqual(obj[key], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase : List[str] = os.path.join(SCREAMING_SNAKE_CASE_, 'image_processor.json' )
image_processor_first.to_json_file(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE_ ).to_dict()
UpperCamelCase : List[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_, image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE_ ).to_dict()
UpperCamelCase : Union[str, Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_, image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key], SCREAMING_SNAKE_CASE_ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def snake_case_ ( self ) -> str:
pass
def UpperCamelCase ( ) -> int:
UpperCamelCase : Optional[int] = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
UpperCamelCase : int = Image.open(dataset[4]['file'] )
UpperCamelCase : Optional[Any] = Image.open(dataset[5]['file'] )
UpperCamelCase : str = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> str:
UpperCamelCase : List[str] = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
UpperCamelCase : List[str] = prepare_images()
# test non-batched
UpperCamelCase : int = image_processing(images[0], return_tensors='pt' )
self.assertIsInstance(encoding.input_ids, torch.LongTensor )
self.assertEqual(encoding.input_ids.shape, (1, 1024) )
UpperCamelCase : Union[str, Any] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist(), SCREAMING_SNAKE_CASE_ )
# test batched
UpperCamelCase : Tuple = image_processing(SCREAMING_SNAKE_CASE_, return_tensors='pt' )
self.assertIsInstance(encoding.input_ids, torch.LongTensor )
self.assertEqual(encoding.input_ids.shape, (2, 1024) )
UpperCamelCase : Optional[Any] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist(), SCREAMING_SNAKE_CASE_ )
| 119 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : Optional[int] = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
_snake_case : List[str] = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
_snake_case : Dict = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
_snake_case : int = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
_snake_case : Union[str, Any] = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
_snake_case : Union[str, Any] = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
_snake_case : Any = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
_snake_case : Optional[int] = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
_snake_case : Dict = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
_snake_case : Union[str, Any] = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
_snake_case : int = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
_snake_case : List[str] = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
_snake_case : str = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
_snake_case : Any = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
_snake_case : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_snake_case : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_snake_case : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_snake_case : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_snake_case : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_snake_case : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_snake_case : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_snake_case : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_snake_case : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_snake_case : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_snake_case : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_snake_case : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_snake_case : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_snake_case : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class a (_BaseAutoModelClass ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = FLAX_MODEL_MAPPING
_snake_case : Optional[Any] = auto_class_update(FlaxAutoModel)
class a (_BaseAutoModelClass ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_snake_case : List[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class a (_BaseAutoModelClass ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_snake_case : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class a (_BaseAutoModelClass ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_snake_case : Union[str, Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class a (_BaseAutoModelClass ):
"""simple docstring"""
__UpperCAmelCase : Tuple = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_snake_case : str = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class a (_BaseAutoModelClass ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_snake_case : Any = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class a (_BaseAutoModelClass ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_snake_case : Optional[int] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class a (_BaseAutoModelClass ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_snake_case : str = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class a (_BaseAutoModelClass ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_snake_case : List[str] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class a (_BaseAutoModelClass ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_snake_case : List[Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class a (_BaseAutoModelClass ):
"""simple docstring"""
__UpperCAmelCase : List[str] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_snake_case : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class a (_BaseAutoModelClass ):
"""simple docstring"""
__UpperCAmelCase : Any = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_snake_case : Optional[Any] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class a (_BaseAutoModelClass ):
"""simple docstring"""
__UpperCAmelCase : List[str] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_snake_case : List[Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 371 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Union[str, Any] = logging.get_logger(__name__)
_snake_case : List[str] = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = "encodec"
def __init__( self : Any , lowerCamelCase : Optional[int]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCamelCase : List[str]=24000 , lowerCamelCase : int=1 , lowerCamelCase : Optional[int]=False , lowerCamelCase : Dict=None , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[int]=128 , lowerCamelCase : Optional[int]=32 , lowerCamelCase : List[str]=1 , lowerCamelCase : str=[8, 5, 4, 2] , lowerCamelCase : List[str]="weight_norm" , lowerCamelCase : Any=7 , lowerCamelCase : Tuple=7 , lowerCamelCase : int=3 , lowerCamelCase : int=2 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[Any]="reflect" , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : int=1.0 , lowerCamelCase : Optional[Any]=1024 , lowerCamelCase : Optional[Any]=None , lowerCamelCase : str=True , **lowerCamelCase : Dict , ) -> Any:
__snake_case : Tuple = target_bandwidths
__snake_case : Union[str, Any] = sampling_rate
__snake_case : Union[str, Any] = audio_channels
__snake_case : Dict = normalize
__snake_case : List[Any] = chunk_length_s
__snake_case : Tuple = overlap
__snake_case : Optional[int] = hidden_size
__snake_case : List[Any] = num_filters
__snake_case : Union[str, Any] = num_residual_layers
__snake_case : Optional[int] = upsampling_ratios
__snake_case : List[str] = norm_type
__snake_case : Optional[int] = kernel_size
__snake_case : Dict = last_kernel_size
__snake_case : Tuple = residual_kernel_size
__snake_case : List[Any] = dilation_growth_rate
__snake_case : Optional[int] = use_causal_conv
__snake_case : Tuple = pad_mode
__snake_case : Union[str, Any] = compress
__snake_case : Union[str, Any] = num_lstm_layers
__snake_case : int = trim_right_ratio
__snake_case : Tuple = codebook_size
__snake_case : Optional[Any] = codebook_dim if codebook_dim is not None else hidden_size
__snake_case : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**lowerCamelCase )
@property
def __snake_case ( self : int ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __snake_case ( self : Union[str, Any] ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __snake_case ( self : Optional[Any] ) -> int:
__snake_case : Union[str, Any] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __snake_case ( self : Optional[Any] ) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 134 | 0 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
a_ = True
from torch.cuda.amp import autocast
a_ = logging.getLogger(__name__)
def _a ( UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Tuple=None ) -> Optional[int]:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=a_ )
@dataclass
class lowercase__ :
a_ =field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a_ =field(
default=_UpperCamelCase, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
a_ =field(
default=_UpperCamelCase, metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
a_ =field(
default=0.1, metadata={"""help""": """The dropout ratio for the attention probabilities."""} )
a_ =field(
default=0.1, metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} )
a_ =field(
default=0.1, metadata={
"""help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."""
}, )
a_ =field(
default=0.1, metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""}, )
a_ =field(
default=0.05, metadata={
"""help""": (
"""Propability of each feature vector along the time axis to be chosen as the start of the vector"""
"""span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"""
"""vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."""
)
}, )
a_ =field(default=0.0, metadata={"""help""": """The LayerDrop probability."""} )
@dataclass
class lowercase__ :
a_ =field(
default=_UpperCamelCase, metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a_ =field(
default="""train+validation""", metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to \'train\'"""
}, )
a_ =field(
default=_UpperCamelCase, metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a_ =field(
default=_UpperCamelCase, metadata={"""help""": """The number of processes to use for the preprocessing."""}, )
a_ =field(
default=_UpperCamelCase, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
}, )
a_ =field(
default=_UpperCamelCase, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of validation examples to this """
"""value if set."""
)
}, )
a_ =list_field(
default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """\'""", """\"""", """�"""], metadata={"""help""": """A list of characters to remove from the transcripts."""}, )
@dataclass
class lowercase__ :
a_ =42
a_ =True
a_ =None
a_ =None
a_ =None
a_ =None
def __call__( self , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = [{'''input_values''': feature['''input_values''']} for feature in features]
lowerCAmelCase__ = [{'''input_ids''': feature['''labels''']} for feature in features]
lowerCAmelCase__ = self.processor.pad(
_UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
lowerCAmelCase__ = self.processor.pad(
labels=_UpperCAmelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , )
# replace padding with -100 to ignore loss correctly
lowerCAmelCase__ = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
lowerCAmelCase__ = labels
return batch
class lowercase__ ( _UpperCamelCase ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Any:
'''simple docstring'''
model.train()
lowerCAmelCase__ = self._prepare_inputs(_UpperCAmelCase )
if self.use_amp:
with autocast():
lowerCAmelCase__ = self.compute_loss(_UpperCAmelCase , _UpperCAmelCase )
else:
lowerCAmelCase__ = self.compute_loss(_UpperCAmelCase , _UpperCAmelCase )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
lowerCAmelCase__ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowerCAmelCase__ = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(F"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" )
if self.args.gradient_accumulation_steps > 1:
lowerCAmelCase__ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_UpperCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(_UpperCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_UpperCAmelCase )
else:
loss.backward()
return loss.detach()
def _a ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , a_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
lowerCAmelCase__ = datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name )
lowerCAmelCase__ = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test" )
# Create and save tokenizer
lowerCAmelCase__ = F"[{''.join(data_args.chars_to_ignore )}]"
def remove_special_characters(UpperCamelCase_ : int ):
lowerCAmelCase__ = re.sub(a_ , "" , batch["sentence"] ).lower() + ''' '''
return batch
lowerCAmelCase__ = train_dataset.map(a_ , remove_columns=["sentence"] )
lowerCAmelCase__ = eval_dataset.map(a_ , remove_columns=["sentence"] )
def extract_all_chars(UpperCamelCase_ : List[Any] ):
lowerCAmelCase__ = ''' '''.join(batch["text"] )
lowerCAmelCase__ = list(set(a_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
lowerCAmelCase__ = train_dataset.map(
a_ , batched=a_ , batch_size=-1 , keep_in_memory=a_ , remove_columns=train_dataset.column_names , )
lowerCAmelCase__ = train_dataset.map(
a_ , batched=a_ , batch_size=-1 , keep_in_memory=a_ , remove_columns=eval_dataset.column_names , )
lowerCAmelCase__ = list(set(vocab_train["vocab"][0] ) | set(vocab_test["vocab"][0] ) )
lowerCAmelCase__ = {v: k for k, v in enumerate(a_ )}
lowerCAmelCase__ = vocab_dict[''' ''']
del vocab_dict[" "]
lowerCAmelCase__ = len(a_ )
lowerCAmelCase__ = len(a_ )
with open("vocab.json" , "w" ) as vocab_file:
json.dump(a_ , a_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0.0 , do_normalize=a_ , return_attention_mask=a_ )
lowerCAmelCase__ = WavaVecaProcessor(feature_extractor=a_ , tokenizer=a_ )
lowerCAmelCase__ = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
lowerCAmelCase__ = min(len(a_ ) , data_args.max_train_samples )
lowerCAmelCase__ = train_dataset.select(range(a_ ) )
if data_args.max_val_samples is not None:
lowerCAmelCase__ = eval_dataset.select(range(data_args.max_val_samples ) )
lowerCAmelCase__ = torchaudio.transforms.Resample(48_000 , 16_000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(UpperCamelCase_ : List[Any] ):
lowerCAmelCase__ = torchaudio.load(batch["path"] )
lowerCAmelCase__ = resampler(a_ ).squeeze().numpy()
lowerCAmelCase__ = 16_000
lowerCAmelCase__ = batch['''text''']
return batch
lowerCAmelCase__ = train_dataset.map(
a_ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
lowerCAmelCase__ = eval_dataset.map(
a_ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(UpperCamelCase_ : Optional[int] ):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"] ) ) == 1
), F"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
lowerCAmelCase__ = processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0] )
batch.update(a_ )
return batch
lowerCAmelCase__ = train_dataset.map(
a_ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=a_ , num_proc=data_args.preprocessing_num_workers , )
lowerCAmelCase__ = eval_dataset.map(
a_ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=a_ , num_proc=data_args.preprocessing_num_workers , )
# Metric
lowerCAmelCase__ = datasets.load_metric("wer" )
def compute_metrics(UpperCamelCase_ : List[Any] ):
lowerCAmelCase__ = pred.predictions
lowerCAmelCase__ = np.argmax(a_ , axis=-1 )
lowerCAmelCase__ = processor.tokenizer.pad_token_id
lowerCAmelCase__ = processor.batch_decode(a_ )
# we do not want to group tokens when computing the metrics
lowerCAmelCase__ = processor.batch_decode(pred.label_ids , group_tokens=a_ )
lowerCAmelCase__ = wer_metric.compute(predictions=a_ , references=a_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
lowerCAmelCase__ = DataCollatorCTCWithPadding(processor=a_ , padding=a_ )
# Initialize our Trainer
lowerCAmelCase__ = CTCTrainer(
model=a_ , data_collator=a_ , args=a_ , compute_metrics=a_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCAmelCase__ = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
lowerCAmelCase__ = model_args.model_name_or_path
else:
lowerCAmelCase__ = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
lowerCAmelCase__ = trainer.train(resume_from_checkpoint=a_ )
trainer.save_model()
lowerCAmelCase__ = train_result.metrics
lowerCAmelCase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
lowerCAmelCase__ = min(a_ , len(a_ ) )
trainer.log_metrics("train" , a_ )
trainer.save_metrics("train" , a_ )
trainer.save_state()
# Evaluation
lowerCAmelCase__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCAmelCase__ = trainer.evaluate()
lowerCAmelCase__ = data_args.max_val_samples if data_args.max_val_samples is not None else len(a_ )
lowerCAmelCase__ = min(a_ , len(a_ ) )
trainer.log_metrics("eval" , a_ )
trainer.save_metrics("eval" , a_ )
return results
if __name__ == "__main__":
main()
| 340 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def __A ( a_ :Tuple) -> Tuple:
__a : int = torch.load(a_ , map_location='''cpu''')
if "model" in sd.keys():
__a : Optional[Any] = torch.load(a_ , map_location='''cpu''')['''model''']
# pop unnecessary weights
__a : Optional[Any] = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(a_)
__a : Tuple = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__a : Tuple = sd.pop(a_)
__a : List[Any] = list(sd.keys())
for key in keys:
if ".qkv_proj." in key:
__a : List[str] = sd[key]
# We split QKV in separate Q,K,V
__a : Optional[int] = key.replace('''.qkv_proj.''' , '''.q_proj.''')
__a : List[Any] = key.replace('''.qkv_proj.''' , '''.k_proj.''')
__a : Any = key.replace('''.qkv_proj.''' , '''.v_proj.''')
__a : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__a , __a , __a : Dict = torch.split(a_ , depth // 3 , dim=0)
__a : Tuple = q
__a : Optional[Any] = k
__a : Tuple = v
del sd[key]
return sd
@torch.no_grad()
def __A ( a_ :str , a_ :Tuple , a_ :Any=None) -> List[str]:
__a : str = load_checkpoint(a_)
if config is not None:
__a : Union[str, Any] = OPTConfig.from_pretrained(a_)
else:
__a : Any = OPTConfig()
__a : List[str] = OPTModel(a_).half().eval()
model.load_state_dict(a_)
# Check results
Path(a_).mkdir(exist_ok=a_)
model.save_pretrained(a_)
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
A = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 160 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCamelCase ( a , a ) -> list:
'''simple docstring'''
if len(a ) != 2 or len(a[0] ) != 2 or len(a ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
__magic_name__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCamelCase ( a , a ) -> str:
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a ) )
]
def UpperCamelCase ( a , a ) -> Union[str, Any]:
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a ) )
]
def UpperCamelCase ( a ) -> tuple[list, list, list, list]:
'''simple docstring'''
if len(a ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
__magic_name__ = len(a )
__magic_name__ = matrix_length // 2
__magic_name__ = [[a[i][j] for j in range(a , a )] for i in range(a )]
__magic_name__ = [
[a[i][j] for j in range(a , a )] for i in range(a , a )
]
__magic_name__ = [[a[i][j] for j in range(a )] for i in range(a )]
__magic_name__ = [[a[i][j] for j in range(a )] for i in range(a , a )]
return top_left, top_right, bot_left, bot_right
def UpperCamelCase ( a ) -> tuple[int, int]:
'''simple docstring'''
return len(a ), len(matrix[0] )
def UpperCamelCase ( a ) -> None:
'''simple docstring'''
print('''\n'''.join(str(a ) for line in matrix ) )
def UpperCamelCase ( a , a ) -> list:
'''simple docstring'''
if matrix_dimensions(a ) == (2, 2):
return default_matrix_multiplication(a , a )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = split_matrix(a )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = split_matrix(a )
__magic_name__ = actual_strassen(a , matrix_subtraction(a , a ) )
__magic_name__ = actual_strassen(matrix_addition(a , a ) , a )
__magic_name__ = actual_strassen(matrix_addition(a , a ) , a )
__magic_name__ = actual_strassen(a , matrix_subtraction(a , a ) )
__magic_name__ = actual_strassen(matrix_addition(a , a ) , matrix_addition(a , a ) )
__magic_name__ = actual_strassen(matrix_subtraction(a , a ) , matrix_addition(a , a ) )
__magic_name__ = actual_strassen(matrix_subtraction(a , a ) , matrix_addition(a , a ) )
__magic_name__ = matrix_addition(matrix_subtraction(matrix_addition(a , a ) , a ) , a )
__magic_name__ = matrix_addition(a , a )
__magic_name__ = matrix_addition(a , a )
__magic_name__ = matrix_subtraction(matrix_subtraction(matrix_addition(a , a ) , a ) , a )
# construct the new matrix from our 4 quadrants
__magic_name__ = []
for i in range(len(a ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(a ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCamelCase ( a , a ) -> list:
'''simple docstring'''
if matrix_dimensions(a )[1] != matrix_dimensions(a )[0]:
__magic_name__ = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F'''Matrix A: {matrixa}\n'''
F'''Matrix B: {matrixa}'''
)
raise Exception(a )
__magic_name__ = matrix_dimensions(a )
__magic_name__ = matrix_dimensions(a )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__magic_name__ = max(*a , *a )
__magic_name__ = int(math.pow(2 , math.ceil(math.loga(a ) ) ) )
__magic_name__ = matrixa
__magic_name__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , a ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__magic_name__ = actual_strassen(a , a )
# Removing the additional zeros
for i in range(0 , a ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
_lowerCAmelCase = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
_lowerCAmelCase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 98 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def UpperCamelCase ( a , a ) -> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCamelCase ( a ) -> list[str]:
'''simple docstring'''
__magic_name__ = []
__magic_name__ = 11
__magic_name__ = int('''1''' + '''0''' * digit_len )
for num in range(a , a ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a , a ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
__magic_name__ = 10
return solutions
def UpperCamelCase ( a = 2 ) -> int:
'''simple docstring'''
__magic_name__ = 1.0
for fraction in fraction_list(a ):
__magic_name__ = Fraction(a )
result *= frac.denominator / frac.numerator
return int(a )
if __name__ == "__main__":
print(solution())
| 98 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
__a = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
'''simple docstring'''
_A : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_A : Optional[str] = field(
default=_lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_A : Optional[str] = field(
default=_lowerCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_A : Optional[str] = field(
default=_lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_A : bool = field(
default=_lowerCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
_A : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_A : bool = field(
default=_lowerCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class lowerCamelCase :
'''simple docstring'''
_A : Optional[str] = field(default=_lowerCAmelCase , metadata={"""help""": """The input training data file (a text file)."""} )
_A : Optional[str] = field(
default=_lowerCAmelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
_A : bool = field(
default=_lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
_A : Optional[int] = field(
default=_lowerCAmelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
_A : Optional[int] = field(
default=_lowerCAmelCase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_A : bool = field(
default=_lowerCAmelCase , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
_A : Optional[int] = field(
default=_lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_A : Optional[int] = field(
default=_lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCAmelCase_ ( self: List[str] ) -> Tuple:
if self.train_file is not None:
snake_case_ :Optional[int] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
snake_case_ :Tuple = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCamelCase :
'''simple docstring'''
_A : PreTrainedTokenizerBase
_A : Union[bool, str, PaddingStrategy] = True
_A : Optional[int] = None
_A : Optional[int] = None
def __call__( self: Dict , snake_case: Union[str, Any] ) -> Dict:
snake_case_ :Dict = """label""" if """label""" in features[0].keys() else """labels"""
snake_case_ :Union[str, Any] = [feature.pop(snake_case ) for feature in features]
snake_case_ :List[Any] = len(snake_case )
snake_case_ :Dict = len(features[0]["""input_ids"""] )
snake_case_ :Any = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case )] for feature in features
]
snake_case_ :Union[str, Any] = list(chain(*snake_case ) )
snake_case_ :Optional[Any] = self.tokenizer.pad(
snake_case , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
snake_case_ :Optional[Any] = {k: v.view(snake_case , snake_case , -1 ) for k, v in batch.items()}
# Add back labels
snake_case_ :Dict = torch.tensor(snake_case , dtype=torch.intaa )
return batch
def A_ ( ):
'''simple docstring'''
snake_case_ :str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_, snake_case_, snake_case_ :Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_, snake_case_, snake_case_ :List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""", _lowercase, _lowercase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ :int = training_args.get_process_log_level()
logger.setLevel(_lowercase )
datasets.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case_ :List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ :Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
snake_case_ :int = {}
if data_args.train_file is not None:
snake_case_ :Dict = data_args.train_file
if data_args.validation_file is not None:
snake_case_ :Optional[Any] = data_args.validation_file
snake_case_ :str = data_args.train_file.split(""".""" )[-1]
snake_case_ :Any = load_dataset(
_lowercase, data_files=_lowercase, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
else:
# Downloading and loading the swag dataset from the hub.
snake_case_ :str = load_dataset(
"""swag""", """regular""", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ :List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
snake_case_ :str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
snake_case_ :int = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool(""".ckpt""" in model_args.model_name_or_path ), config=_lowercase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
snake_case_ :Optional[int] = [f"""ending{i}""" for i in range(4 )]
snake_case_ :int = """sent1"""
snake_case_ :int = """sent2"""
if data_args.max_seq_length is None:
snake_case_ :int = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
snake_case_ :Optional[int] = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
snake_case_ :Any = min(data_args.max_seq_length, tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowercase ):
snake_case_ :Dict = [[context] * 4 for context in examples[context_name]]
snake_case_ :Optional[Any] = examples[question_header_name]
snake_case_ :Optional[int] = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_lowercase )
]
# Flatten out
snake_case_ :Optional[int] = list(chain(*_lowercase ) )
snake_case_ :Optional[int] = list(chain(*_lowercase ) )
# Tokenize
snake_case_ :Any = tokenizer(
_lowercase, _lowercase, truncation=_lowercase, max_length=_lowercase, padding="""max_length""" if data_args.pad_to_max_length else False, )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0, len(_lowercase ), 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
snake_case_ :Dict = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
snake_case_ :Optional[int] = min(len(_lowercase ), data_args.max_train_samples )
snake_case_ :Tuple = train_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
snake_case_ :int = train_dataset.map(
_lowercase, batched=_lowercase, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
snake_case_ :str = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
snake_case_ :Any = min(len(_lowercase ), data_args.max_eval_samples )
snake_case_ :Optional[Any] = eval_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
snake_case_ :List[Any] = eval_dataset.map(
_lowercase, batched=_lowercase, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
# Data collator
snake_case_ :List[str] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowercase, pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowercase ):
snake_case_, snake_case_ :Dict = eval_predictions
snake_case_ :Tuple = np.argmax(_lowercase, axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
snake_case_ :int = Trainer(
model=_lowercase, args=_lowercase, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=_lowercase, data_collator=_lowercase, compute_metrics=_lowercase, )
# Training
if training_args.do_train:
snake_case_ :Tuple = None
if training_args.resume_from_checkpoint is not None:
snake_case_ :List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ :List[Any] = last_checkpoint
snake_case_ :Any = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
snake_case_ :Tuple = train_result.metrics
snake_case_ :List[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
snake_case_ :Optional[Any] = min(_lowercase, len(_lowercase ) )
trainer.log_metrics("""train""", _lowercase )
trainer.save_metrics("""train""", _lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
snake_case_ :List[Any] = trainer.evaluate()
snake_case_ :List[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowercase )
snake_case_ :Union[str, Any] = min(_lowercase, len(_lowercase ) )
trainer.log_metrics("""eval""", _lowercase )
trainer.save_metrics("""eval""", _lowercase )
snake_case_ :int = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
def A_ ( _lowercase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 66 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: Any ) -> str:
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case , )
assert hasattr(self , """env""" )
def lowerCAmelCase_ ( self: int , snake_case: Dict ) -> List[Any]:
# configuration for running training on smdistributed Model Parallel
snake_case_ :Tuple = {
"""enabled""": True,
"""processes_per_host""": 8,
}
snake_case_ :List[Any] = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
snake_case_ :Tuple = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
snake_case_ :Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case , instance_type=self.instance_type , debugger_hook_config=snake_case , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case , py_version="""py36""" , )
def lowerCAmelCase_ ( self: Any , snake_case: Tuple ) -> List[str]:
TrainingJobAnalytics(snake_case ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowerCAmelCase_ ( self: Dict , snake_case: Dict ) -> List[Any]:
# create estimator
snake_case_ :List[Any] = self.create_estimator(snake_case )
# run training
estimator.fit()
# result dataframe
snake_case_ :Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case_ :Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
snake_case_ :Dict = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case_ :int = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case )
| 66 | 1 |
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase ( ) ->Optional[int]:
"""simple docstring"""
__snake_case : int = torch.nn.Linear(2 , 4 )
__snake_case : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 )
__snake_case : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__snake_case : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__snake_case : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase ( _snake_case : str ) ->Optional[Any]:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase ( _snake_case : Union[str, Any] ) ->Tuple:
"""simple docstring"""
__snake_case : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_snake_case )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a_ ):
__snake_case : Any = Accelerator(cpu=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case : Optional[int] = GradientState()
assert state.num_steps == 1
__snake_case : str = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__snake_case : List[Any] = False
assert state.sync_gradients is False
GradientState._reset_state()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a_ , **a_ ):
pass
with patch('''torch.cuda.set_device''' , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
__snake_case : List[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : Any = get_signature(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : List[Any] = get_signature(a_ )
# saving hook
def save_config(a_ , a_ , a_ ):
__snake_case : Optional[Any] = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(a_ , '''data.json''' ) , '''w''' ) as f:
json.dump(a_ , a_ )
# loading hook
def load_config(a_ , a_ ):
with open(os.path.join(a_ , '''data.json''' ) , '''r''' ) as f:
__snake_case : Any = json.load(a_ )
__snake_case : List[str] = config['''class_name''']
__snake_case : str = accelerator.register_save_state_pre_hook(a_ )
__snake_case : Union[str, Any] = accelerator.register_load_state_pre_hook(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Any = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks removed
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Union[str, Any] = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = create_components()
__snake_case : Union[str, Any] = None
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertTrue(dummy_obj is None )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
__snake_case : Optional[int] = [1, 2, 3]
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map={'''''': 0} , )
__snake_case : Optional[Any] = Accelerator()
# This should work
__snake_case : Any = accelerator.prepare(a_ )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Any = Accelerator()
with init_empty_weights():
__snake_case : List[str] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : Union[str, Any] = infer_auto_device_map(a_ )
__snake_case : str = '''cpu'''
__snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ )
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Dict = accelerator.prepare(a_ )
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : str = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
__snake_case : Any = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : List[Any] = infer_auto_device_map(a_ )
__snake_case : Dict = 1
__snake_case : str = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Tuple = accelerator.prepare(a_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
__snake_case : Tuple = infer_auto_device_map(a_ )
__snake_case : Tuple = 1
__snake_case : List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Tuple = Accelerator()
# This should work
__snake_case : Dict = accelerator.prepare(a_ )
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = torch.nn.Linear(10 , 10 )
__snake_case : List[str] = torch.optim.SGD(model.parameters() , lr=0.01 )
__snake_case : Optional[Any] = Accelerator(cpu=a_ )
__snake_case : str = accelerator.prepare(a_ )
| 24 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=24 , a_=2 , a_=6 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=None , a_=10_00 , ):
'''simple docstring'''
__snake_case : Any = parent
__snake_case : int = batch_size
__snake_case : Dict = seq_length
__snake_case : List[str] = is_training
__snake_case : List[Any] = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : Union[str, Any] = use_labels
__snake_case : str = vocab_size
__snake_case : int = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : Any = type_vocab_size
__snake_case : Dict = type_sequence_label_size
__snake_case : Optional[Any] = initializer_range
__snake_case : Union[str, Any] = num_labels
__snake_case : Any = scope
__snake_case : Any = range_bbox
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : List[str] = bbox[i, j, 3]
__snake_case : Any = bbox[i, j, 1]
__snake_case : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : List[str] = bbox[i, j, 2]
__snake_case : Union[str, Any] = bbox[i, j, 0]
__snake_case : Dict = t
__snake_case : Optional[int] = None
if self.use_input_mask:
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case : Dict = None
if self.use_token_type_ids:
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : List[str] = None
__snake_case : Union[str, Any] = None
if self.use_labels:
__snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Any = model(a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ )
__snake_case : str = model(a_ , bbox=a_ , token_type_ids=a_ )
__snake_case : List[str] = model(a_ , bbox=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = self.num_labels
__snake_case : List[str] = LiltForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Tuple = model(
a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[Any] = LiltForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case : int = model(
a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Dict = config_and_inputs
__snake_case : Any = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
return True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModelTester(self )
__snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Dict = type
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = LiltModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a_ )
__snake_case : Dict = torch.tensor([[1, 2]] , device=a_ )
__snake_case : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a_ )
# forward pass
with torch.no_grad():
__snake_case : Union[str, Any] = model(input_ids=a_ , bbox=a_ )
__snake_case : Union[str, Any] = torch.Size([1, 2, 7_68] )
__snake_case : str = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=a_ , )
self.assertTrue(outputs.last_hidden_state.shape , a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a_ , atol=1E-3 ) )
| 24 | 1 |
"""simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def UpperCamelCase_ ( lowerCAmelCase__ : Callable ) -> Callable:
"""simple docstring"""
@wraps(lowerCAmelCase__ )
def _inner_fn(*lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : Dict ):
warnings.warn(
(f"'{fn.__name__}' is experimental and might be subject to breaking changes in the future.") , lowerCAmelCase__ , )
return fn(*lowerCAmelCase__ , **lowerCAmelCase__ )
return _inner_fn
| 224 |
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ : Tuple = [0] * len(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Tuple = [1] * len(lowerCAmelCase__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCAmelCase__ ) ):
if indegree[i] == 0:
queue.append(lowerCAmelCase__ )
while queue:
lowerCAmelCase_ : Union[str, Any] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowerCAmelCase_ : Any = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowerCAmelCase__ )
print(max(lowerCAmelCase__ ) )
# Adjacency list of Graph
lowercase__ : Any = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 224 | 1 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase_ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase_ = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCAmelCase_ = spec.loader.load_module()
lowerCAmelCase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase_ = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
lowerCAmelCase_ = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def __UpperCAmelCase ( ) -> Any:
lowercase__ : Tuple = []
for config_class in list(CONFIG_MAPPING.values() ):
lowercase__ : int = False
# source code of `config_class`
lowercase__ : int = inspect.getsource(__lowerCamelCase )
lowercase__ : Any = _re_checkpoint.findall(__lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowercase__ , lowercase__ : List[str] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowercase__ : Tuple = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowercase__ : Optional[int] = True
break
lowercase__ : Optional[Any] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
lowercase__ : List[Any] = '''\n'''.join(sorted(__lowerCamelCase ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 302 |
"""simple docstring"""
import math
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
lowercase__ : Tuple = 0
lowercase__ : Tuple = 0
while num > 0:
lowercase__ : int = num % 8
lowercase__ : Tuple = octal + (remainder * math.floor(math.pow(10 , __lowerCamelCase ) ))
counter += 1
lowercase__ : Optional[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"""0o{int(__lowerCamelCase )}"""
def __UpperCAmelCase ( ) -> None:
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(2_16 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(5_12 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 302 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCamelCase = False
class snake_case_ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
lowercase__ : List[str] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
lowercase__ : Tuple = torch.manual_seed(0 )
lowercase__ : List[Any] = pipe.dual_guided(
prompt="first prompt" , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase_ )
lowercase__ : List[str] = VersatileDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Any = generator.manual_seed(0 )
lowercase__ : Optional[Any] = pipe.dual_guided(
prompt="first prompt" , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
lowercase__ : str = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Tuple = "cyberpunk 2077"
lowercase__ : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
lowercase__ : Any = torch.manual_seed(0 )
lowercase__ : Any = pipe.dual_guided(
prompt=lowercase_ , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
lowercase__ : Optional[int] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase__ : Optional[int] = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ : str = "A painting of a squirrel eating a burger "
lowercase__ : Any = torch.manual_seed(0 )
lowercase__ : List[str] = pipe.text_to_image(
prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
lowercase__ : Any = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase__ : Optional[int] = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ : Union[str, Any] = pipe.image_variation(lowercase_ , generator=lowercase_ , output_type="numpy" ).images
lowercase__ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase__ : int = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 87 | '''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def a ( self : int ) -> Optional[Any]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a ( self : List[Any] ) -> Any:
__lowerCAmelCase = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] ) -> Tuple:
__lowerCAmelCase = self._create_example_records()
__lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(SCREAMING_SNAKE_CASE__ ):
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , example_records[i] )
def a ( self : Tuple ) -> List[str]:
__lowerCAmelCase = self._create_example_records()
__lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a ( self : List[str] ) -> List[str]: # checks what happens with missing columns
__lowerCAmelCase = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def a ( self : Dict ) -> Optional[int]: # checks if the type can be inferred from the second record
__lowerCAmelCase = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def a ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase = Dataset.from_list([] )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 229 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( a_: Tuple ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __UpperCAmelCase ( a_: dict[int, list[int]] ):
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Union[str, Any] = len(a_ ) # No of vertices in graph
_UpperCAmelCase : Union[str, Any] = [0] * n
_UpperCAmelCase : str = [False] * n
def dfs(a_: Dict, a_: List[Any], a_: Tuple, a_: Optional[int] ):
_UpperCAmelCase : str = True
_UpperCAmelCase : Optional[int] = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(a_, a_, a_, id_ )
_UpperCAmelCase : Tuple = min(low[at], low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
_UpperCAmelCase : Optional[int] = min(low[at], low[to] )
_UpperCAmelCase : list[tuple[int, int]] = []
for i in range(a_ ):
if not visited[i]:
dfs(a_, -1, a_, id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod() | 17 | '''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
__a = logging.getLogger(__name__)
@dataclass
class A__ :
"""simple docstring"""
UpperCamelCase_ : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase_ : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class A__ :
"""simple docstring"""
UpperCamelCase_ : Optional[str] = field(default=UpperCamelCase , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCamelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase_ : Optional[int] = field(
default=UpperCamelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCamelCase , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
if self.train_file is not None:
_UpperCAmelCase : List[Any] = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_UpperCAmelCase : List[str] = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A__ :
"""simple docstring"""
UpperCamelCase_ : PreTrainedTokenizerBase
UpperCamelCase_ : Union[bool, str, PaddingStrategy] = True
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Optional[int] = None
def __call__( self : List[Any] , lowerCAmelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : int = "label" if "label" in features[0].keys() else "labels"
_UpperCAmelCase : Dict = [feature.pop(lowerCAmelCase__ ) for feature in features]
_UpperCAmelCase : str = len(lowerCAmelCase__ )
_UpperCAmelCase : int = len(features[0]["input_ids"] )
_UpperCAmelCase : str = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCAmelCase__ )] for feature in features
]
_UpperCAmelCase : List[str] = list(chain(*lowerCAmelCase__ ) )
_UpperCAmelCase : Any = self.tokenizer.pad(
lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
_UpperCAmelCase : Any = {k: v.view(lowerCAmelCase__ , lowerCAmelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
_UpperCAmelCase : List[str] = torch.tensor(lowerCAmelCase__ , dtype=torch.intaa )
return batch
def __UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag", a_, a_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase : Optional[int] = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_UpperCAmelCase : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_UpperCAmelCase : Union[str, Any] = {}
if data_args.train_file is not None:
_UpperCAmelCase : str = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase : Optional[Any] = data_args.validation_file
_UpperCAmelCase : Dict = data_args.train_file.split("." )[-1]
_UpperCAmelCase : Optional[int] = load_dataset(
a_, data_files=a_, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
else:
# Downloading and loading the swag dataset from the hub.
_UpperCAmelCase : Dict = load_dataset(
"swag", "regular", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_UpperCAmelCase : str = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=a_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_UpperCAmelCase : Optional[Any] = [f"""ending{i}""" for i in range(4 )]
_UpperCAmelCase : List[Any] = "sent1"
_UpperCAmelCase : Optional[int] = "sent2"
if data_args.max_seq_length is None:
_UpperCAmelCase : List[str] = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
_UpperCAmelCase : Dict = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
_UpperCAmelCase : Dict = min(data_args.max_seq_length, tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(a_: Union[str, Any] ):
_UpperCAmelCase : Optional[int] = [[context] * 4 for context in examples[context_name]]
_UpperCAmelCase : Tuple = examples[question_header_name]
_UpperCAmelCase : Optional[Any] = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(a_ )
]
# Flatten out
_UpperCAmelCase : List[str] = list(chain(*a_ ) )
_UpperCAmelCase : Dict = list(chain(*a_ ) )
# Tokenize
_UpperCAmelCase : List[Any] = tokenizer(
a_, a_, truncation=a_, max_length=a_, padding="max_length" if data_args.pad_to_max_length else False, )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0, len(a_ ), 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_UpperCAmelCase : int = raw_datasets["train"]
if data_args.max_train_samples is not None:
_UpperCAmelCase : Optional[Any] = min(len(a_ ), data_args.max_train_samples )
_UpperCAmelCase : List[Any] = train_dataset.select(range(a_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_UpperCAmelCase : Union[str, Any] = train_dataset.map(
a_, batched=a_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_UpperCAmelCase : Dict = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_UpperCAmelCase : int = min(len(a_ ), data_args.max_eval_samples )
_UpperCAmelCase : List[str] = eval_dataset.select(range(a_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_UpperCAmelCase : Optional[int] = eval_dataset.map(
a_, batched=a_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
# Data collator
_UpperCAmelCase : Tuple = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=a_, pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(a_: Tuple ):
_UpperCAmelCase , _UpperCAmelCase : Tuple = eval_predictions
_UpperCAmelCase : Union[str, Any] = np.argmax(a_, axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_UpperCAmelCase : Any = Trainer(
model=a_, args=a_, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=a_, data_collator=a_, compute_metrics=a_, )
# Training
if training_args.do_train:
_UpperCAmelCase : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : List[str] = last_checkpoint
_UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=a_ )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase : str = train_result.metrics
_UpperCAmelCase : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
_UpperCAmelCase : Union[str, Any] = min(a_, len(a_ ) )
trainer.log_metrics("train", a_ )
trainer.save_metrics("train", a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCAmelCase : List[Any] = trainer.evaluate()
_UpperCAmelCase : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
_UpperCAmelCase : Tuple = min(a_, len(a_ ) )
trainer.log_metrics("eval", a_ )
trainer.save_metrics("eval", a_ )
_UpperCAmelCase : int = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
def __UpperCAmelCase ( a_: int ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 17 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , *a_ , **a_ ):
'''simple docstring'''
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , a_ , )
super().__init__(*a_ , **a_ )
| 102 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Any = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 167 | 0 |
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 350 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : Any = StableUnCLIPPipeline
lowerCamelCase : int = TEXT_TO_IMAGE_PARAMS
lowerCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowerCamelCase : Optional[int] = False
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase = 3_2
lowerCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase__ , projection_dim=UpperCAmelCase__ , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=UpperCAmelCase__ , num_layers=1 , )
torch.manual_seed(0 )
lowerCAmelCase = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=UpperCAmelCase__ , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase__ )
lowerCAmelCase = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase__ , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase__ , layers_per_block=1 , upcast_attention=UpperCAmelCase__ , use_linear_projection=UpperCAmelCase__ , )
torch.manual_seed(0 )
lowerCAmelCase = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=UpperCAmelCase__ , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL()
lowerCAmelCase = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any]=0 ) -> Optional[Any]:
if str(UpperCAmelCase__ ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(UpperCAmelCase__ )
else:
lowerCAmelCase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
lowerCAmelCase = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase__ )
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
lowerCAmelCase = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase__ )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
lowerCAmelCase = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase = pipe('anime turle' , generator=UpperCAmelCase__ , output_type='np' )
lowerCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
lowerCAmelCase = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 55 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.