code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=UpperCamelCase__ ):
__lowercase = ["""speech"""]
def __init__( self :Optional[Any] , *lowercase_ :int , **lowercase_ :List[str] )-> Optional[int]:
requires_backends(self , ["speech"] )
class UpperCAmelCase ( metaclass=UpperCamelCase__ ):
__lowercase = ["""speech"""]
def __init__( self :Dict , *lowercase_ :Union[str, Any] , **lowercase_ :List[str] )-> Optional[Any]:
requires_backends(self , ["speech"] )
| 237 |
'''simple docstring'''
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :Optional[Any] , *lowercase_ :int , lowercase_ :Any=None , lowercase_ :List[str]=None , **lowercase_ :Any )-> Any:
super().__init__(*lowercase_ , **lowercase_ )
A__ = eval_examples
A__ = post_process_function
def UpperCAmelCase_ ( self :str , lowercase_ :str=None , lowercase_ :Optional[int]=None , lowercase_ :Optional[int]=None , lowercase_ :str = "eval" )-> Union[str, Any]:
A__ = self.eval_dataset if eval_dataset is None else eval_dataset
A__ = self.get_eval_dataloader(lowercase_ )
A__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
A__ = time.time()
try:
A__ = eval_loop(
lowercase_ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A__ = self.post_process_function(lowercase_ , lowercase_ , output.predictions )
A__ = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
A__ = metrics.pop(lowercase_ )
metrics.update(output.metrics )
else:
A__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_ )
return metrics
def UpperCAmelCase_ ( self :List[str] , lowercase_ :List[Any] , lowercase_ :str , lowercase_ :Any=None , lowercase_ :str = "test" )-> List[Any]:
A__ = self.get_test_dataloader(lowercase_ )
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
A__ = time.time()
try:
A__ = eval_loop(
lowercase_ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ = self.post_process_function(lowercase_ , lowercase_ , output.predictions , "predict" )
A__ = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
A__ = metrics.pop(lowercase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_ )
| 237 | 1 |
def __lowercase ( a__ ) -> list:
__SCREAMING_SNAKE_CASE = len(a__ )
for i in range(1 , a__ ):
__SCREAMING_SNAKE_CASE = collection[i]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = i - 1
while low <= high:
__SCREAMING_SNAKE_CASE = (low + high) // 2
if val < collection[mid]:
__SCREAMING_SNAKE_CASE = mid - 1
else:
__SCREAMING_SNAKE_CASE = mid + 1
for j in range(a__ , a__ , -1 ):
__SCREAMING_SNAKE_CASE = collection[j - 1]
__SCREAMING_SNAKE_CASE = val
return collection
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] =input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ : int =[int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 361 |
from __future__ import annotations
from collections.abc import Generator
def __lowercase ( ) -> Generator[int, None, None]:
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 2
while True:
__SCREAMING_SNAKE_CASE = factor_map.pop(a__ , a__ )
if factor:
__SCREAMING_SNAKE_CASE = factor + prime
while x in factor_map:
x += factor
__SCREAMING_SNAKE_CASE = factor
else:
__SCREAMING_SNAKE_CASE = prime
yield prime
prime += 1
def __lowercase ( a__ = 1E10 ) -> int:
__SCREAMING_SNAKE_CASE = sieve()
__SCREAMING_SNAKE_CASE = 1
while True:
__SCREAMING_SNAKE_CASE = next(a__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(a__ )
n += 2
if __name__ == "__main__":
print(solution())
| 118 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def _lowerCamelCase ( lowercase : List[Any] ) -> str:
_a = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_a = 128
elif "12-12" in model_name:
_a = 12
_a = 12
elif "14-14" in model_name:
_a = 14
_a = 14
elif "16-16" in model_name:
_a = 16
_a = 16
else:
raise ValueError("Model not supported" )
_a = "huggingface/label-files"
if "speech-commands" in model_name:
_a = 35
_a = "speech-commands-v2-id2label.json"
else:
_a = 527
_a = "audioset-id2label.json"
_a = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
_a = {int(lowercase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase ( lowercase : Dict ) -> Optional[Any]:
if "module.v" in name:
_a = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
_a = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
_a = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
_a = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
_a = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
_a = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
_a = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_a = name.replace("attn" , "attention.self" )
if "norm1" in name:
_a = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_a = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_a = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_a = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_a = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
_a = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
_a = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple ) -> List[str]:
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(lowercase )
if "qkv" in key:
_a = key.split("." )
_a = int(key_split[3] )
_a = config.hidden_size
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
else:
_a = val
return orig_state_dict
def _lowerCamelCase ( lowercase : Optional[int] ) -> Optional[int]:
_a = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
@torch.no_grad()
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : Dict=False ) -> int:
_a = get_audio_spectrogram_transformer_config(lowercase )
_a = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
_a = model_name_to_url[model_name]
_a = torch.hub.load_state_dict_from_url(lowercase , map_location="cpu" )
# remove some keys
remove_keys(lowercase )
# rename some keys
_a = convert_state_dict(lowercase , lowercase )
# load 🤗 model
_a = ASTForAudioClassification(lowercase )
model.eval()
model.load_state_dict(lowercase )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_a = -4.2_67_73_93 if "speech-commands" not in model_name else -6.84_59_78
_a = 4.5_68_99_74 if "speech-commands" not in model_name else 5.5_65_45_26
_a = 1024 if "speech-commands" not in model_name else 128
_a = ASTFeatureExtractor(mean=lowercase , std=lowercase , max_length=lowercase )
if "speech-commands" in model_name:
_a = load_dataset("speech_commands" , "v0.02" , split="validation" )
_a = dataset[0]["audio"]["array"]
else:
_a = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
_a , _a = torchaudio.load(lowercase )
_a = waveform.squeeze().numpy()
_a = feature_extractor(lowercase , sampling_rate=1_6000 , return_tensors="pt" )
# forward pass
_a = model(**lowercase )
_a = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_a = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_a = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_a = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_a = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_a = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_a = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_a = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
_a = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , lowercase , atol=1E-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase )
print(F'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(lowercase )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F'MIT/{model_name}' )
feature_extractor.push_to_hub(F'MIT/{model_name}' )
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase_ : Dict = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 63 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : str = {}
class __snake_case ( a ):
UpperCAmelCase__ : str = '''llama'''
UpperCAmelCase__ : Dict = ['''past_key_values''']
def __init__( self : str , _snake_case : List[str]=32000 , _snake_case : int=4096 , _snake_case : List[str]=11008 , _snake_case : Optional[int]=32 , _snake_case : List[Any]=32 , _snake_case : Tuple=None , _snake_case : int="silu" , _snake_case : List[Any]=2048 , _snake_case : List[str]=0.0_2 , _snake_case : Any=1e-6 , _snake_case : List[str]=True , _snake_case : Optional[Any]=0 , _snake_case : Dict=1 , _snake_case : List[Any]=2 , _snake_case : str=1 , _snake_case : Union[str, Any]=False , _snake_case : str=None , **_snake_case : List[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_key_value_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = pretraining_tp
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , tie_word_embeddings=_snake_case , **_snake_case , )
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _snake_case) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _snake_case)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _snake_case)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""")
if rope_scaling_factor is None or not isinstance(_snake_case , _snake_case) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""")
| 51 | 0 |
lowercase_ = 8.314_462 # Unit - J mol-1 K-1
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 364 |
import sys
lowercase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = N ) -> int:
lowercase__ = -sys.maxsize - 1
for i in range(len(_SCREAMING_SNAKE_CASE ) - 12 ):
lowercase__ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
lowercase__ = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 269 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_a = 16
_a = 32
def lowerCAmelCase__(__snake_case ,__snake_case = 16 ,__snake_case = "bert-base-cased" ) -> Any:
'''simple docstring'''
lowerCamelCase__ = AutoTokenizer.from_pretrained(_lowercase )
lowerCamelCase__ = load_dataset('''glue''' ,'''mrpc''' )
def tokenize_function(__snake_case ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_lowercase ,max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase__ = datasets.map(
_lowercase ,batched=_lowercase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(__snake_case ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase ,padding='''max_length''' ,max_length=128 ,return_tensors='''pt''' )
return tokenizer.pad(_lowercase ,padding='''longest''' ,return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCamelCase__ = DataLoader(
tokenized_datasets['''train'''] ,shuffle=_lowercase ,collate_fn=_lowercase ,batch_size=_lowercase )
lowerCamelCase__ = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=_lowercase ,collate_fn=_lowercase ,batch_size=_lowercase )
return train_dataloader, eval_dataloader
def lowerCAmelCase__(__snake_case ,__snake_case ) -> str:
'''simple docstring'''
lowerCamelCase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ = config['''lr''']
lowerCamelCase__ = int(config['''num_epochs'''] )
lowerCamelCase__ = int(config['''seed'''] )
lowerCamelCase__ = int(config['''batch_size'''] )
lowerCamelCase__ = args.model_name_or_path
set_seed(_lowercase )
lowerCamelCase__ = get_dataloaders(_lowercase ,_lowercase ,_lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ = AutoModelForSequenceClassification.from_pretrained(_lowercase ,return_dict=_lowercase )
# Instantiate optimizer
lowerCamelCase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase__ = optimizer_cls(params=model.parameters() ,lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCamelCase__ = 1
lowerCamelCase__ = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase__ = get_linear_schedule_with_warmup(
optimizer=_lowercase ,num_warmup_steps=0 ,num_training_steps=_lowercase ,)
else:
lowerCamelCase__ = DummyScheduler(_lowercase ,total_num_steps=_lowercase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ = accelerator.prepare(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase__ = 0
# Now we train the model
lowerCamelCase__ = evaluate.load('''glue''' ,'''mrpc''' )
lowerCamelCase__ = 0
lowerCamelCase__ = {}
for epoch in range(_lowercase ,_lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
lowerCamelCase__ = model(**_lowercase )
lowerCamelCase__ = outputs.loss
lowerCamelCase__ = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowerCamelCase__ = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ = model(**_lowercase )
lowerCamelCase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase__ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase ) - 1:
lowerCamelCase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase ,references=_lowercase ,)
lowerCamelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' ,_lowercase )
lowerCamelCase__ = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
lowerCamelCase__ = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,'''all_results.json''' ) ,'''w''' ) as f:
json.dump(_lowercase ,_lowercase )
def lowerCAmelCase__() -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' ,type=_lowercase ,default='''bert-base-cased''' ,help='''Path to pretrained model or model identifier from huggingface.co/models.''' ,required=_lowercase ,)
parser.add_argument(
'''--output_dir''' ,type=_lowercase ,default='''.''' ,help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' ,)
parser.add_argument(
'''--performance_lower_bound''' ,type=_lowercase ,default=_lowercase ,help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' ,)
parser.add_argument(
'''--num_epochs''' ,type=_lowercase ,default=3 ,help='''Number of train epochs.''' ,)
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(_lowercase ,_lowercase )
if __name__ == "__main__":
main()
| 209 | from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""input_features""", """attention_mask"""]
def __init__( self : Any , UpperCamelCase__ : List[str]=80 , UpperCamelCase__ : Tuple=1_6000 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Tuple=10 , UpperCamelCase__ : int=25 , UpperCamelCase__ : Optional[Any]="hamming_window" , UpperCamelCase__ : Tuple=3_2768.0 , UpperCamelCase__ : str=0.97 , UpperCamelCase__ : List[str]=1.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=False , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = feature_size
SCREAMING_SNAKE_CASE : Union[str, Any] = sampling_rate
SCREAMING_SNAKE_CASE : int = padding_value
SCREAMING_SNAKE_CASE : Optional[Any] = hop_length
SCREAMING_SNAKE_CASE : Tuple = win_length
SCREAMING_SNAKE_CASE : Union[str, Any] = frame_signal_scale
SCREAMING_SNAKE_CASE : int = preemphasis_coeff
SCREAMING_SNAKE_CASE : List[Any] = mel_floor
SCREAMING_SNAKE_CASE : int = normalize_means
SCREAMING_SNAKE_CASE : List[str] = normalize_vars
SCREAMING_SNAKE_CASE : Any = win_function
SCREAMING_SNAKE_CASE : Union[str, Any] = return_attention_mask
SCREAMING_SNAKE_CASE : int = win_length * sampling_rate // 1000
SCREAMING_SNAKE_CASE : Optional[int] = hop_length * sampling_rate // 1000
SCREAMING_SNAKE_CASE : int = optimal_fft_length(self.sample_size )
SCREAMING_SNAKE_CASE : Tuple = (self.n_fft // 2) + 1
def __A ( self : str , UpperCamelCase__ : np.array ):
'''simple docstring'''
if self.win_function == "hamming_window":
SCREAMING_SNAKE_CASE : List[str] = window_function(window_length=self.sample_size , name=self.win_function , periodic=UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Tuple = window_function(window_length=self.sample_size , name=self.win_function )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
SCREAMING_SNAKE_CASE : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=UpperCamelCase__ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=UpperCamelCase__ , preemphasis=self.preemphasis_coeff , mel_filters=UpperCamelCase__ , mel_floor=self.mel_floor , log_mel='''log''' , )
return msfc_features.T
def __A ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
if self.normalize_means:
SCREAMING_SNAKE_CASE : str = x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE : List[str] = np.subtract(UpperCamelCase__ , UpperCamelCase__ )
if self.normalize_vars:
SCREAMING_SNAKE_CASE : str = x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE : Optional[int] = np.divide(UpperCamelCase__ , UpperCamelCase__ )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE : List[str] = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE : str = x.astype(np.floataa )
return x
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[np.ndarray] , UpperCamelCase__ : Optional[np.ndarray] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(UpperCamelCase__ , UpperCamelCase__ , self.padding_value ) for x, n in zip(UpperCamelCase__ , UpperCamelCase__ )]
def __call__( self : Dict , UpperCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE : Any = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE : str = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : int = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE : Dict = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : List[str] = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE : Optional[Any] = [self._extract_mfsc_features(UpperCamelCase__ ) for one_waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Tuple = BatchFeature({'''input_features''': features} )
SCREAMING_SNAKE_CASE : Optional[Any] = self.pad(
UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
# make sure list is in array format
SCREAMING_SNAKE_CASE : Union[str, Any] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Any = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE : Union[str, Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(UpperCamelCase__ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
SCREAMING_SNAKE_CASE : Optional[Any] = (
np.array(UpperCamelCase__ , dtype=np.intaa )
if self._get_padding_strategies(UpperCamelCase__ , max_length=UpperCamelCase__ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
SCREAMING_SNAKE_CASE : List[Any] = self.normalize(
padded_inputs['''input_features'''] , attention_mask=UpperCamelCase__ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = padded_inputs.convert_to_tensors(UpperCamelCase__ )
return padded_inputs
| 182 | 0 |
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
_A = logging.getLogger(__name__)
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Union[str, Any] = "token-classification"
def __init__( self : Dict , UpperCamelCase : Any ) -> Optional[int]:
"""simple docstring"""
if type(UpperCamelCase ) == dict:
lowerCAmelCase__ : Optional[int] = Namespace(**UpperCamelCase )
lowerCAmelCase__ : Tuple = import_module("""tasks""" )
try:
lowerCAmelCase__ : Union[str, Any] = getattr(UpperCamelCase , hparams.task_type )
lowerCAmelCase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
lowerCAmelCase__ : Optional[Any] = self.token_classification_task.get_labels(hparams.labels )
lowerCAmelCase__ : Dict = CrossEntropyLoss().ignore_index
super().__init__(UpperCamelCase , len(self.labels ) , self.mode )
def _lowerCAmelCase ( self : int , **UpperCamelCase : List[Any] ) -> str:
"""simple docstring"""
return self.model(**UpperCamelCase )
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Tuple = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
lowerCAmelCase__ : List[str] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCAmelCase__ : Tuple = self(**UpperCamelCase )
lowerCAmelCase__ : List[Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.hparams
for mode in ["train", "dev", "test"]:
lowerCAmelCase__ : Union[str, Any] = self._feature_file(UpperCamelCase )
if os.path.exists(UpperCamelCase ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , UpperCamelCase )
lowerCAmelCase__ : Tuple = torch.load(UpperCamelCase )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
lowerCAmelCase__ : Union[str, Any] = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCamelCase )
lowerCAmelCase__ : Tuple = self.token_classification_task.convert_examples_to_features(
UpperCamelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCamelCase , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , UpperCamelCase )
torch.save(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : bool = False ) -> DataLoader:
"""simple docstring"""
lowerCAmelCase__ : int = self._feature_file(UpperCamelCase )
logger.info("""Loading features from cached file %s""" , UpperCamelCase )
lowerCAmelCase__ : int = torch.load(UpperCamelCase )
lowerCAmelCase__ : str = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCAmelCase__ : Any = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowerCAmelCase__ : Optional[int] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowerCAmelCase__ : Union[str, Any] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowerCAmelCase__ : Union[str, Any] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , batch_size=UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
"""Compute validation""" ""
lowerCAmelCase__ : str = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
lowerCAmelCase__ : List[Any] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCAmelCase__ : Union[str, Any] = self(**UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = outputs[:2]
lowerCAmelCase__ : Optional[Any] = logits.detach().cpu().numpy()
lowerCAmelCase__ : Optional[Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : str = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
lowerCAmelCase__ : Any = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
lowerCAmelCase__ : List[str] = np.argmax(UpperCamelCase , axis=2 )
lowerCAmelCase__ : str = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
lowerCAmelCase__ : Any = dict(enumerate(self.labels ) )
lowerCAmelCase__ : str = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase__ : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowerCAmelCase__ : Optional[int] = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(UpperCamelCase , UpperCamelCase ),
"""precision""": precision_score(UpperCamelCase , UpperCamelCase ),
"""recall""": recall_score(UpperCamelCase , UpperCamelCase ),
"""f1""": fa_score(UpperCamelCase , UpperCamelCase ),
}
lowerCAmelCase__ : Dict = dict(results.items() )
lowerCAmelCase__ : List[Any] = results
return ret, preds_list, out_label_list
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
# when stable
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = self._eval_end(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCAmelCase ( self : Dict , UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
# updating to test_epoch_end instead of deprecated test_end
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._eval_end(UpperCamelCase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowerCAmelCase__ : int = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCAmelCase ( UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
# Add NER specific options
BaseTransformer.add_model_specific_args(UpperCamelCase , UpperCamelCase )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=UpperCamelCase , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=1_28 , type=UpperCamelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=UpperCamelCase , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=UpperCamelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
_A = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
_A = NERTransformer.add_model_specific_args(parser, os.getcwd())
_A = parser.parse_args()
_A = NERTransformer(args)
_A = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
_A = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
_A = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 212 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Dict ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowerCAmelCase__ : Union[str, Any] = Vector()
def _lowerCAmelCase ( self : Union[str, Any] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : str = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCamelCase ) , """(0,0,0,0,0,1)""" )
def _lowerCAmelCase ( self : Any ) -> None:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCamelCase ) , 4 )
def _lowerCAmelCase ( self : List[str] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : str = Vector([1, 2] )
lowerCAmelCase__ : Optional[int] = Vector([1, 2, 3, 4, 5] )
lowerCAmelCase__ : Union[str, Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowerCAmelCase__ : List[Any] = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def _lowerCAmelCase ( self : Any ) -> None:
"""simple docstring"""
lowerCAmelCase__ : int = Vector([1, 2, 3] )
lowerCAmelCase__ : Optional[Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def _lowerCAmelCase ( self : Optional[Any] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = Vector([1, 2, 3] )
lowerCAmelCase__ : Dict = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def _lowerCAmelCase ( self : Optional[Any] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Any = Vector([1, 2, 3] )
lowerCAmelCase__ : Any = Vector([2, -1, 4] ) # for test of dot product
lowerCAmelCase__ : Any = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def _lowerCAmelCase ( self : int ) -> None:
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def _lowerCAmelCase ( self : Tuple ) -> None:
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def _lowerCAmelCase ( self : Optional[Any] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = Vector([1, 2, 3] )
lowerCAmelCase__ : Optional[Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCamelCase , UpperCamelCase ) ) , """(3,4,7)""" )
def _lowerCAmelCase ( self : Optional[int] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = Vector([1, 0, 0, 0, 0, 0] )
lowerCAmelCase__ : Any = x.copy()
self.assertEqual(str(UpperCamelCase ) , str(UpperCamelCase ) )
def _lowerCAmelCase ( self : Optional[Any] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCamelCase ) , """(0,1,0)""" )
def _lowerCAmelCase ( self : int ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCamelCase ) )
def _lowerCAmelCase ( self : Union[str, Any] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase__ : Dict = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCamelCase , UpperCamelCase ) )
def _lowerCAmelCase ( self : List[Any] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase__ : int = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCamelCase , UpperCamelCase ) )
def _lowerCAmelCase ( self : int ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def _lowerCAmelCase ( self : int ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowerCAmelCase__ : Tuple = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def _lowerCAmelCase ( self : str ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCamelCase ) )
def _lowerCAmelCase ( self : Tuple ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def _lowerCAmelCase ( self : Any ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase__ : List[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def _lowerCAmelCase ( self : List[Any] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase__ : Dict = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def _lowerCAmelCase ( self : Union[str, Any] ) -> None:
"""simple docstring"""
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 212 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = '''▁'''
_UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_UpperCamelCase = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
_UpperCamelCase = {
'''facebook/xglm-564M''': 2048,
}
class _lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : str =VOCAB_FILES_NAMES
UpperCAmelCase_ : List[Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Optional[Any] =["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
'''simple docstring'''
__snake_case : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__snake_case : Tuple = 7
__snake_case : int = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
__snake_case : str = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
__snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase_ ) )
__snake_case : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__snake_case : Union[str, Any] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__snake_case : Union[str, Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
__snake_case : Any = len(self.sp_model )
__snake_case : Tuple = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCAmelCase_ )
__snake_case : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[str]:
'''simple docstring'''
__snake_case : List[Any] = self.__dict__.copy()
__snake_case : str = None
__snake_case : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case : int = {}
__snake_case : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__snake_case : Any = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_ ))
return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ ))
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : str = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case : Optional[int] = self.sp_model.PieceToId(UpperCAmelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
__snake_case : str = "".join(UpperCAmelCase_ ).replace(UpperCAmelCase_ , " " ).strip()
return out_string
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case : List[str] = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , "wb" ) as fi:
__snake_case : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 326 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = BartphoTokenizer
lowercase_ = False
lowercase_ = True
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple:
'''simple docstring'''
super().setUp()
lowerCamelCase__: int =["▁This", "▁is", "▁a", "▁t", "est"]
lowerCamelCase__: Tuple =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: List[Any] ={"unk_token": "<unk>"}
lowerCamelCase__: Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file , "w" , encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""")
lowerCamelCase__: Dict =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Optional[Any]) ->str:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] ="This is a là test"
lowerCamelCase__: Optional[Any] ="This is a<unk><unk> test"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: str =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
lowerCamelCase__: List[Any] ="This is a là test"
lowerCamelCase__: Optional[int] ="▁This ▁is ▁a ▁l à ▁t est".split()
lowerCamelCase__: Optional[int] =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =tokens + [tokenizer.unk_token]
lowerCamelCase__: List[Any] =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 10 | 0 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
snake_case = """sshleifer/mar_enro_6_3_student"""
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def _A ( self : Dict ):
super().setUp()
SCREAMING_SNAKE_CASE : Any = cached_path(
"https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz" , extract_compressed_file=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Optional[int] = f'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def _A ( self : int ):
MarianMTModel.from_pretrained(UpperCAmelCase_ )
@slow
@require_torch_gpu
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Optional[Any] = {
"$MAX_LEN": 64,
"$BS": 64,
"$GAS": 1,
"$ENRO_DIR": self.data_dir,
"facebook/mbart-large-cc25": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"--learning_rate=3e-5": "--learning_rate 3e-4",
"--num_train_epochs 6": "--num_train_epochs 1",
}
# Clean up bash script
SCREAMING_SNAKE_CASE : Optional[Any] = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py" )[1].strip()
SCREAMING_SNAKE_CASE : List[str] = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
for k, v in env_vars_to_replace.items():
SCREAMING_SNAKE_CASE : int = bash_script.replace(UpperCAmelCase_ , str(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
SCREAMING_SNAKE_CASE : Union[str, Any] = f'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
SCREAMING_SNAKE_CASE : Dict = ["finetune.py"] + bash_script.split() + args
with patch.object(UpperCAmelCase_ , "argv" , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE : List[str] = pl.Trainer.add_argparse_args(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = SummarizationModule.add_model_specific_args(UpperCAmelCase_ , os.getcwd() )
SCREAMING_SNAKE_CASE : int = parser.parse_args()
SCREAMING_SNAKE_CASE : List[Any] = main(UpperCAmelCase_ )
# Check metrics
SCREAMING_SNAKE_CASE : List[str] = load_json(model.metrics_save_path )
SCREAMING_SNAKE_CASE : str = metrics["val"][0]
SCREAMING_SNAKE_CASE : Union[str, Any] = metrics["val"][-1]
self.assertEqual(len(metrics["val"] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f'''val_avg_{model.val_metric}'''] , UpperCAmelCase_ )
self.assertGreater(last_step_stats["val_avg_gen_time"] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["val_avg_gen_time"] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["val_avg_bleu"] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
SCREAMING_SNAKE_CASE : Tuple = os.listdir(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = [x for x in contents if x.endswith(".ckpt" )][0]
SCREAMING_SNAKE_CASE : int = os.path.join(args.output_dir , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = torch.load(UpperCAmelCase_ , map_location="cpu" )
SCREAMING_SNAKE_CASE : Union[str, Any] = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
SCREAMING_SNAKE_CASE : Union[str, Any] = {os.path.basename(UpperCAmelCase_ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Optional[int] = f'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
SCREAMING_SNAKE_CASE : str = {
"--fp16_opt_level=O1": "",
"$MAX_LEN": 128,
"$BS": 16,
"$GAS": 1,
"$ENRO_DIR": data_dir,
"$m": "sshleifer/student_marian_en_ro_6_1",
"val_check_interval=0.25": "val_check_interval=1.0",
}
# Clean up bash script
SCREAMING_SNAKE_CASE : Any = (
(self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py" )[1].strip()
)
SCREAMING_SNAKE_CASE : Any = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
SCREAMING_SNAKE_CASE : Dict = bash_script.replace("--fp16 " , " " )
for k, v in env_vars_to_replace.items():
SCREAMING_SNAKE_CASE : Union[str, Any] = bash_script.replace(UpperCAmelCase_ , str(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : str = bash_script.replace("--fp16" , "" )
SCREAMING_SNAKE_CASE : Optional[int] = 6
SCREAMING_SNAKE_CASE : str = (
["distillation.py"]
+ bash_script.split()
+ [
f'''--output_dir={output_dir}''',
"--gpus=1",
"--learning_rate=1e-3",
f'''--num_train_epochs={epochs}''',
"--warmup_steps=10",
"--val_check_interval=1.0",
"--do_predict",
]
)
with patch.object(UpperCAmelCase_ , "argv" , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE : Optional[Any] = pl.Trainer.add_argparse_args(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = SummarizationDistiller.add_model_specific_args(UpperCAmelCase_ , os.getcwd() )
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
SCREAMING_SNAKE_CASE : List[Any] = distill_main(UpperCAmelCase_ )
# Check metrics
SCREAMING_SNAKE_CASE : Optional[Any] = load_json(model.metrics_save_path )
SCREAMING_SNAKE_CASE : str = metrics["val"][0]
SCREAMING_SNAKE_CASE : Dict = metrics["val"][-1]
assert len(metrics["val"] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f'''val_avg_{model.val_metric}'''] , UpperCAmelCase_ )
# check lightning ckpt can be loaded and has a reasonable statedict
SCREAMING_SNAKE_CASE : int = os.listdir(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = [x for x in contents if x.endswith(".ckpt" )][0]
SCREAMING_SNAKE_CASE : Any = os.path.join(args.output_dir , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.load(UpperCAmelCase_ , map_location="cpu" )
SCREAMING_SNAKE_CASE : Tuple = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
SCREAMING_SNAKE_CASE : Any = {os.path.basename(UpperCAmelCase_ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
| 360 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 319 | 0 |
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
lowerCAmelCase : Tuple = logging.get_logger(__name__)
def a__ ( snake_case__ , snake_case__ ) -> Tuple:
try:
with open(snake_case__ , """rb""" ) as flax_state_f:
lowerCamelCase = from_bytes(snake_case__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(snake_case__ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
def a__ ( snake_case__ , snake_case__ ) -> Tuple:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
lowerCamelCase = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values()
if any(snake_case__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
lowerCamelCase = jax.tree_util.tree_map(
lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ )
lowerCamelCase = """"""
lowerCamelCase = flatten_dict(snake_case__ , sep=""".""" )
lowerCamelCase = pt_model.state_dict()
# keep track of unexpected & missing keys
lowerCamelCase = []
lowerCamelCase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCamelCase = jnp.transpose(snake_case__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCamelCase = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(snake_case__ ):
lowerCamelCase = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
lowerCamelCase = """.""".join(snake_case__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
lowerCamelCase = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor
lowerCamelCase = torch.from_numpy(snake_case__ )
# remove from missing keys
missing_keys.remove(snake_case__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case__ )
pt_model.load_state_dict(snake_case__ )
# re-transform missing_keys to list
lowerCamelCase = list(snake_case__ )
if len(snake_case__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(snake_case__ ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
""" use it for predictions and inference.""" )
return pt_model
| 291 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_attention_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_choices
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_attention_mask:
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
if self.use_token_type_ids:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = FlaxRoFormerModelTester(self )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_a )
lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
lowerCamelCase = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase = model(_a )[0]
lowerCamelCase = 50_000
lowerCamelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , _a )
lowerCamelCase = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 291 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__A ={
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
__A ={
'''google/realm-cc-news-pretrained-embedder''': 5_1_2,
'''google/realm-cc-news-pretrained-encoder''': 5_1_2,
'''google/realm-cc-news-pretrained-scorer''': 5_1_2,
'''google/realm-cc-news-pretrained-openqa''': 5_1_2,
'''google/realm-orqa-nq-openqa''': 5_1_2,
'''google/realm-orqa-nq-reader''': 5_1_2,
'''google/realm-orqa-wq-openqa''': 5_1_2,
'''google/realm-orqa-wq-reader''': 5_1_2,
}
__A ={
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = RealmTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Optional[int]:
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowercase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowercase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowercase ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(lowercase , normalizer_state.pop("type" ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**lowercase )
lowerCamelCase_ = do_lower_case
def SCREAMING_SNAKE_CASE_( self , lowercase , **lowercase ) -> Optional[Any]:
lowerCamelCase_ = PaddingStrategy.MAX_LENGTH
lowerCamelCase_ = text
lowerCamelCase_ = kwargs.pop("text_pair" , lowercase )
lowerCamelCase_ = kwargs.pop("return_tensors" , lowercase )
lowerCamelCase_ = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(lowercase ):
if batch_text_pair is not None:
lowerCamelCase_ = batch_text_pair[idx]
else:
lowerCamelCase_ = None
lowerCamelCase_ = super().__call__(lowercase , lowercase , return_tensors=lowercase , **lowercase )
lowerCamelCase_ = encoded_candidates.get("input_ids" )
lowerCamelCase_ = encoded_candidates.get("attention_mask" )
lowerCamelCase_ = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(lowercase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(lowercase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(lowercase )
lowerCamelCase_ = {key: item for key, item in output_data.items() if len(lowercase ) != 0}
return BatchEncoding(lowercase , tensor_type=lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=None ) -> int:
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
lowerCamelCase_ = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 47 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__A =True
except ImportError:
__A =False
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase_ ( lowerCamelCase__ ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> int:
lowerCamelCase_ = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=lowercase , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=lowercase , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=lowercase )
def __init__( self , lowercase , lowercase , lowercase=None , *lowercase ) -> List[str]:
lowerCamelCase_ = testing
lowerCamelCase_ = testing_file
lowerCamelCase_ = path
def SCREAMING_SNAKE_CASE_( self ) -> str:
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCamelCase_ = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(lowercase ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
lowerCamelCase_ = (
Path(lowercase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCamelCase_ = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowercase ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
lowerCamelCase_ = json.load(lowercase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowercase , extra_context=lowercase , )
lowerCamelCase_ = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
lowerCamelCase_ = json.load(lowercase )
lowerCamelCase_ = configuration["lowercase_modelname"]
lowerCamelCase_ = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'{directory}/configuration.json' )
lowerCamelCase_ = "PyTorch" in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ = "TensorFlow" in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ = "Flax" in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(lowercase , exist_ok=lowercase )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=lowercase )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , "w" ):
pass
shutil.move(
f'{directory}/__init__.py' , f'{model_dir}/__init__.py' , )
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' , f'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(lowercase ):
with open(lowercase , "r" ) as f:
lowerCamelCase_ = f.readlines()
with open(lowercase , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowercase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' , f'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' , f'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' , f'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' , f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowercase , lowercase , lowercase ):
# Create temp file
lowerCamelCase_ , lowerCamelCase_ = mkstemp()
lowerCamelCase_ = False
with fdopen(lowercase , "w" ) as new_file:
with open(lowercase ) as old_file:
for line in old_file:
new_file.write(lowercase )
if line_to_copy_below in line:
lowerCamelCase_ = True
for line_to_copy in lines_to_copy:
new_file.write(lowercase )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(lowercase , lowercase )
# Remove original file
remove(lowercase )
# Move new file
move(lowercase , lowercase )
def skip_units(lowercase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowercase ):
with open(lowercase ) as datafile:
lowerCamelCase_ = []
lowerCamelCase_ = False
lowerCamelCase_ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCamelCase_ = line.split("\"" )[1]
lowerCamelCase_ = skip_units(lowercase )
elif "# Below: " in line and "##" not in line:
lowerCamelCase_ = line.split("\"" )[1]
lowerCamelCase_ = skip_units(lowercase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowercase , lowercase , lowercase )
lowerCamelCase_ = []
elif "# Replace with" in line and "##" not in line:
lowerCamelCase_ = []
elif "##" not in line:
lines_to_copy.append(lowercase )
remove(lowercase )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(lowercase )
| 47 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case :List[str] = {
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[str] = [
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__snake_case :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 49 | '''simple docstring'''
import requests
__a: str = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="""
def __UpperCamelCase ( UpperCAmelCase ):
# fetching a list of articles in json format
lowercase__ : Optional[Any] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
| 198 | 0 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__lowerCamelCase = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
__lowerCamelCase = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
__lowerCamelCase = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
__lowerCamelCase = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
__lowerCamelCase = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ):
for tf_name, hf_name in patterns:
snake_case : Dict = k.replace(__lowerCamelCase , __lowerCamelCase )
return k
def UpperCamelCase ( __lowerCamelCase : dict , __lowerCamelCase : dict ):
snake_case : Optional[Any] = BigBirdPegasusConfig(**__lowerCamelCase )
snake_case : Optional[int] = BigBirdPegasusForConditionalGeneration(__lowerCamelCase )
snake_case : Optional[Any] = torch_model.state_dict()
snake_case : str = {}
# separating decoder weights
snake_case : Optional[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
snake_case : str = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
snake_case : Dict = [k.endswith(__lowerCamelCase ) for ending in KEYS_TO_IGNORE]
if any(__lowerCamelCase ):
continue
snake_case : Union[str, Any] = DECODER_PATTERNS
snake_case : List[Any] = rename_state_dict_key(__lowerCamelCase , __lowerCamelCase )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
snake_case : Tuple = v.T
snake_case : Tuple = torch.from_numpy(__lowerCamelCase )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
snake_case : List[str] = [k.endswith(__lowerCamelCase ) for ending in KEYS_TO_IGNORE]
if any(__lowerCamelCase ):
continue
snake_case : Any = REMAINING_PATTERNS
snake_case : Tuple = rename_state_dict_key(__lowerCamelCase , __lowerCamelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
snake_case : Union[str, Any] = v.T
snake_case : str = torch.from_numpy(__lowerCamelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
snake_case : str = mapping["model.embed_positions.weight"]
snake_case : Optional[int] = mapping.pop("model.embed_positions.weight" )
snake_case , snake_case : Union[str, Any] = torch_model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
snake_case : Tuple = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def UpperCamelCase ( __lowerCamelCase : int ):
snake_case : Any = tf.train.list_variables(__lowerCamelCase )
snake_case : Optional[int] = {}
snake_case : int = ["global_step"]
for name, shape in tqdm(__lowerCamelCase , desc="converting tf checkpoint to dict" ):
snake_case : Dict = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case : List[str] = tf.train.load_variable(__lowerCamelCase , __lowerCamelCase )
snake_case : Tuple = array
return tf_weights
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : dict ):
snake_case : Tuple = get_tf_weights_as_numpy(__lowerCamelCase )
snake_case : Union[str, Any] = convert_bigbird_pegasus(__lowerCamelCase , __lowerCamelCase )
torch_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 10 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__lowerCamelCase = Mapping[str, np.ndarray]
__lowerCamelCase = Mapping[str, Any] # Is a nested dict.
__lowerCamelCase = 0.01
@dataclasses.dataclass(frozen=A_ )
class UpperCAmelCase :
A__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
A__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
A__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
A__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
A__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
A__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
A__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
A__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
A__ : Optional[Sequence[int]] = None
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Dict = r"(\[[A-Z]+\]\n)"
snake_case : List[str] = [tag.strip() for tag in re.split(__lowerCamelCase , __lowerCamelCase ) if len(__lowerCamelCase ) > 0]
snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
snake_case : List[str] = ["N", "CA", "C"]
snake_case : str = None
snake_case : str = None
snake_case : Tuple = None
for g in groups:
if "[PRIMARY]" == g[0]:
snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowerCamelCase ) ):
if seq[i] not in residue_constants.restypes:
snake_case : Optional[Any] = "X" # FIXME: strings are immutable
snake_case : Optional[int] = np.array(
[residue_constants.restype_order.get(__lowerCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowerCamelCase , g[1][axis].split() ) ) )
snake_case : Union[str, Any] = np.array(__lowerCamelCase )
snake_case : str = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Dict = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
snake_case : List[str] = np.zeros(
(
len(__lowerCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Any = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowerCamelCase , atom_mask=__lowerCamelCase , aatype=__lowerCamelCase , residue_index=np.arange(len(__lowerCamelCase ) ) , b_factors=__lowerCamelCase , )
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : int = 0 ):
snake_case : List[str] = []
snake_case : str = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
snake_case : Union[str, Any] = prot.parents
snake_case : Dict = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
snake_case : Tuple = [p for i, p in zip(__lowerCamelCase , __lowerCamelCase ) if i == chain_id]
if parents is None or len(__lowerCamelCase ) == 0:
snake_case : int = ["N/A"]
pdb_headers.append(f"""PARENT {' '.join(__lowerCamelCase )}""" )
return pdb_headers
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : str ):
snake_case : List[str] = []
snake_case : Any = pdb_str.split("\n" )
snake_case : int = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
snake_case : Optional[Any] = []
if prot.parents_chain_index is not None:
snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowerCamelCase ) , [] )
parent_dict[str(__lowerCamelCase )].append(__lowerCamelCase )
snake_case : List[str] = max([int(__lowerCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
snake_case : Optional[Any] = parent_dict.get(str(__lowerCamelCase ) , ["N/A"] )
parents_per_chain.append(__lowerCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
snake_case : Optional[Any] = [["N/A"]]
def make_parent_line(__lowerCamelCase : Sequence[str] ) -> str:
return f"""PARENT {' '.join(__lowerCamelCase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
snake_case : List[Any] = 0
for i, l in enumerate(__lowerCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowerCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowerCamelCase ):
snake_case : int = parents_per_chain[chain_counter]
else:
snake_case : Any = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowerCamelCase ) )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
snake_case : str = residue_constants.restypes + ["X"]
def res_atoa(__lowerCamelCase : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
snake_case : List[Any] = residue_constants.atom_types
snake_case : List[str] = []
snake_case : Any = prot.atom_mask
snake_case : Any = prot.aatype
snake_case : Dict = prot.atom_positions
snake_case : List[str] = prot.residue_index.astype(np.intaa )
snake_case : Dict = prot.b_factors
snake_case : Tuple = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
snake_case : Any = get_pdb_headers(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
pdb_lines.extend(__lowerCamelCase )
snake_case : Dict = aatype.shape[0]
snake_case : Tuple = 1
snake_case : Any = 0
snake_case : Union[str, Any] = string.ascii_uppercase
snake_case : int = None
# Add all atom sites.
for i in range(__lowerCamelCase ):
snake_case : List[Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowerCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
snake_case : Any = "ATOM"
snake_case : str = atom_name if len(__lowerCamelCase ) == 4 else f""" {atom_name}"""
snake_case : Optional[Any] = ""
snake_case : Dict = ""
snake_case : Optional[Any] = 1.00
snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
snake_case : Dict = ""
snake_case : Any = "A"
if chain_index is not None:
snake_case : str = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
snake_case : List[str] = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
snake_case : Optional[int] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
snake_case : Any = True
snake_case : Tuple = chain_index[i + 1]
if should_terminate:
# Close the chain.
snake_case : Optional[Any] = "TER"
snake_case : Optional[int] = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowerCamelCase , __lowerCamelCase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def UpperCamelCase ( __lowerCamelCase : FeatureDict , __lowerCamelCase : ModelOutput , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[Sequence[str]] = None , __lowerCamelCase : Optional[Sequence[int]] = None , ):
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowerCamelCase , remark=__lowerCamelCase , parents=__lowerCamelCase , parents_chain_index=__lowerCamelCase , )
| 10 | 1 |
'''simple docstring'''
def a__ ( a__ = 1_00_00_00 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = {1: 1}
for inputa in range(2 , a__ ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__SCREAMING_SNAKE_CASE = (3 * number) + 1
counter += 1
if inputa not in counters:
__SCREAMING_SNAKE_CASE = counter
if counter > pre_counter:
__SCREAMING_SNAKE_CASE = inputa
__SCREAMING_SNAKE_CASE = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 267 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase : List[str] = logging.get_logger(__name__)
class lowerCAmelCase__ ( a , a ):
"""simple docstring"""
lowerCAmelCase__ = "maskformer-swin"
lowerCAmelCase__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : str , __SCREAMING_SNAKE_CASE : Tuple=224 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=96 , __SCREAMING_SNAKE_CASE : Optional[Any]=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE : Any=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Dict=4.0 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=1E-5 , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Dict=None , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Tuple:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embed_dim
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = num_heads
__SCREAMING_SNAKE_CASE = window_size
__SCREAMING_SNAKE_CASE = mlp_ratio
__SCREAMING_SNAKE_CASE = qkv_bias
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = use_absolute_embeddings
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__SCREAMING_SNAKE_CASE = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__SCREAMING_SNAKE_CASE = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 267 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[str] = 3_8_4
_lowerCAmelCase : Union[str, Any] = 7
if "tiny" in model_name:
_lowerCAmelCase : Optional[int] = 9_6
_lowerCAmelCase : Optional[int] = (2, 2, 6, 2)
_lowerCAmelCase : Union[str, Any] = (3, 6, 1_2, 2_4)
elif "small" in model_name:
_lowerCAmelCase : Optional[int] = 9_6
_lowerCAmelCase : int = (2, 2, 1_8, 2)
_lowerCAmelCase : List[str] = (3, 6, 1_2, 2_4)
elif "base" in model_name:
_lowerCAmelCase : List[str] = 1_2_8
_lowerCAmelCase : Dict = (2, 2, 1_8, 2)
_lowerCAmelCase : Dict = (4, 8, 1_6, 3_2)
_lowerCAmelCase : List[str] = 1_2
_lowerCAmelCase : Dict = 5_1_2
elif "large" in model_name:
_lowerCAmelCase : Union[str, Any] = 1_9_2
_lowerCAmelCase : Any = (2, 2, 1_8, 2)
_lowerCAmelCase : Dict = (6, 1_2, 2_4, 4_8)
_lowerCAmelCase : str = 1_2
_lowerCAmelCase : str = 7_6_8
# set label information
_lowerCAmelCase : List[str] = 1_5_0
_lowerCAmelCase : Union[str, Any] = 'huggingface/label-files'
_lowerCAmelCase : List[Any] = 'ade20k-id2label.json'
_lowerCAmelCase : List[str] = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase : Tuple = {int(_A ): v for k, v in idalabel.items()}
_lowerCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : str = SwinConfig(
embed_dim=_A , depths=_A , num_heads=_A , window_size=_A , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
_lowerCAmelCase : List[Any] = UperNetConfig(
backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , )
return config
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : str = dct.pop(_A )
_lowerCAmelCase : Tuple = val
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCAmelCase : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCAmelCase : Dict = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' )
_lowerCAmelCase : Dict = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Tuple = in_proj_weight[:dim, :]
_lowerCAmelCase : List[Any] = in_proj_bias[: dim]
_lowerCAmelCase : Dict = in_proj_weight[
dim : dim * 2, :
]
_lowerCAmelCase : Tuple = in_proj_bias[
dim : dim * 2
]
_lowerCAmelCase : Dict = in_proj_weight[
-dim :, :
]
_lowerCAmelCase : Any = in_proj_bias[-dim :]
# fmt: on
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : str = x.shape
_lowerCAmelCase : List[str] = x.reshape(_A , 4 , in_channel // 4 )
_lowerCAmelCase : Dict = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_A , _A )
return x
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = x.shape
_lowerCAmelCase : Dict = x.reshape(_A , in_channel // 4 , 4 )
_lowerCAmelCase : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_A , _A )
return x
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[str] = x.shape[0]
_lowerCAmelCase : Tuple = x.reshape(4 , in_channel // 4 )
_lowerCAmelCase : Any = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_A )
return x
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[str] = x.shape[0]
_lowerCAmelCase : Tuple = x.reshape(in_channel // 4 , 4 )
_lowerCAmelCase : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_A )
return x
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : str = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
_lowerCAmelCase : int = model_name_to_url[model_name]
_lowerCAmelCase : Tuple = torch.hub.load_state_dict_from_url(_A , map_location='cpu' , file_name=_A )[
'state_dict'
]
for name, param in state_dict.items():
print(_A , param.shape )
_lowerCAmelCase : Optional[Any] = get_upernet_config(_A )
_lowerCAmelCase : List[str] = UperNetForSemanticSegmentation(_A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowerCAmelCase : List[Any] = state_dict.pop(_A )
if "bn" in key:
_lowerCAmelCase : Optional[int] = key.replace('bn' , 'batch_norm' )
_lowerCAmelCase : Dict = val
# rename keys
_lowerCAmelCase : Optional[int] = create_rename_keys(_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
_lowerCAmelCase : List[str] = reverse_correct_unfold_reduction_order(_A )
if "norm" in key:
_lowerCAmelCase : int = reverse_correct_unfold_norm_order(_A )
model.load_state_dict(_A )
# verify on image
_lowerCAmelCase : List[Any] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
_lowerCAmelCase : Optional[Any] = Image.open(requests.get(_A , stream=_A ).raw ).convert('RGB' )
_lowerCAmelCase : str = SegformerImageProcessor()
_lowerCAmelCase : List[Any] = processor(_A , return_tensors='pt' ).pixel_values
with torch.no_grad():
_lowerCAmelCase : List[Any] = model(_A )
_lowerCAmelCase : Tuple = outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
_lowerCAmelCase : Optional[Any] = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
_lowerCAmelCase : Dict = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
_lowerCAmelCase : str = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_A )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_A )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[F'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase : List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 359 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 25 | 0 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __lowerCamelCase ( _lowercase ) -> Dict:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class UpperCamelCase_ ( nn.Module ):
def __init__( self , A , A ) -> Tuple:
super().__init__()
UpperCAmelCase : Tuple = module
UpperCAmelCase : Optional[Any] = nn.Sequential(
nn.Linear(module.in_features , __snake_case , bias=__snake_case ) , nn.Linear(__snake_case , module.out_features , bias=__snake_case ) , )
UpperCAmelCase : Union[str, Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__snake_case )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _lowercase( self , A , *A , **A ) -> Union[str, Any]:
return self.module(__snake_case , *__snake_case , **__snake_case ) + self.adapter(__snake_case )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = """bigscience/bloom-1b7"""
# Constant values
lowercase = 2.109_6595_5269_2574
lowercase = """Hello my name is"""
lowercase = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
lowercase = 10
def _lowercase( self ) -> Optional[Any]:
# Models and tokenizer
UpperCAmelCase : Any = AutoTokenizer.from_pretrained(self.model_name )
class UpperCamelCase_ ( _snake_case ):
def _lowercase( self ) -> Optional[Any]:
super().setUp()
# Models and tokenizer
UpperCAmelCase : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__snake_case , device_map="""auto""" )
def _lowercase( self ) -> int:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : str = self.model_abit.config
self.assertTrue(hasattr(__snake_case , """quantization_config""" ) )
UpperCAmelCase : List[str] = config.to_dict()
UpperCAmelCase : List[str] = config.to_diff_dict()
UpperCAmelCase : Dict = config.to_json_string()
def _lowercase( self ) -> List[str]:
from bitsandbytes.nn import Paramsabit
UpperCAmelCase : List[Any] = self.model_fpaa.get_memory_footprint()
UpperCAmelCase : Dict = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
UpperCAmelCase : List[Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _lowercase( self ) -> List[str]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__snake_case , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _lowercase( self ) -> Any:
UpperCAmelCase : Optional[int] = self.tokenizer(self.input_text , return_tensors="""pt""" )
UpperCAmelCase : Optional[int] = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__snake_case ) , self.EXPECTED_OUTPUTS )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Optional[int] = BitsAndBytesConfig()
UpperCAmelCase : Tuple = True
UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__snake_case , device_map="""auto""" )
UpperCAmelCase : List[str] = self.tokenizer(self.input_text , return_tensors="""pt""" )
UpperCAmelCase : Dict = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__snake_case ) , self.EXPECTED_OUTPUTS )
def _lowercase( self ) -> Any:
with self.assertRaises(__snake_case ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__snake_case )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Union[str, Any] = BitsAndBytesConfig()
with self.assertRaises(__snake_case ):
UpperCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__snake_case , load_in_abit=__snake_case , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def _lowercase( self ) -> Tuple:
with self.assertRaises(__snake_case ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(__snake_case ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__snake_case ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(__snake_case ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__snake_case ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
UpperCAmelCase : Optional[Any] = self.tokenizer(self.input_text , return_tensors="""pt""" )
UpperCAmelCase : int = self.model_fpaa.to(torch.floataa )
UpperCAmelCase : List[Any] = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
UpperCAmelCase : str = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
UpperCAmelCase : List[str] = self.model_fpaa.half()
# Check this does not throw an error
UpperCAmelCase : Any = self.model_fpaa.float()
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=__snake_case , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class UpperCamelCase_ ( unittest.TestCase ):
@classmethod
def _lowercase( cls ) -> Any:
UpperCAmelCase : Any = """t5-small"""
UpperCAmelCase : Optional[int] = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(cls.model_name )
UpperCAmelCase : Optional[int] = """Translate in German: Hello, my dog is cute"""
def _lowercase( self ) -> Dict:
gc.collect()
torch.cuda.empty_cache()
def _lowercase( self ) -> Optional[int]:
from transformers import TaForConditionalGeneration
UpperCAmelCase : Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules
UpperCAmelCase : List[str] = None
# test with `t5-small`
UpperCAmelCase : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__snake_case , device_map="""auto""" )
UpperCAmelCase : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
UpperCAmelCase : Optional[Any] = model.generate(**__snake_case )
# test with `flan-t5-small`
UpperCAmelCase : Optional[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__snake_case , device_map="""auto""" )
UpperCAmelCase : Any = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
UpperCAmelCase : List[str] = model.generate(**__snake_case )
UpperCAmelCase : Optional[int] = modules
def _lowercase( self ) -> Any:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
UpperCAmelCase : Dict = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__snake_case , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
UpperCAmelCase : str = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
UpperCAmelCase : Tuple = model.generate(**__snake_case )
# test with `flan-t5-small`
UpperCAmelCase : Optional[int] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__snake_case , device_map="""auto""" )
UpperCAmelCase : List[Any] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
UpperCAmelCase : int = model.generate(**__snake_case )
class UpperCamelCase_ ( _snake_case ):
def _lowercase( self ) -> Tuple:
super().setUp()
# model_name
UpperCAmelCase : List[Any] = """bigscience/bloom-560m"""
UpperCAmelCase : Optional[Any] = """t5-small"""
# Different types of model
UpperCAmelCase : Dict = AutoModel.from_pretrained(self.model_name , load_in_abit=__snake_case , device_map="""auto""" )
# Sequence classification model
UpperCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__snake_case , device_map="""auto""" )
# CausalLM model
UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__snake_case , device_map="""auto""" )
# Seq2seq model
UpperCAmelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__snake_case , device_map="""auto""" )
def _lowercase( self ) -> List[str]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _lowercase( self ) -> Optional[int]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class UpperCamelCase_ ( _snake_case ):
def _lowercase( self ) -> Dict:
super().setUp()
def _lowercase( self ) -> List[Any]:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _lowercase( self ) -> Tuple:
UpperCAmelCase : List[Any] = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
UpperCAmelCase : Union[str, Any] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class UpperCamelCase_ ( _snake_case ):
def _lowercase( self ) -> Union[str, Any]:
super().setUp()
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__snake_case , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
UpperCAmelCase : int = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
UpperCAmelCase : List[str] = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__snake_case ) , self.EXPECTED_OUTPUTS )
class UpperCamelCase_ ( _snake_case ):
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : int = """facebook/opt-350m"""
super().setUp()
def _lowercase( self ) -> List[str]:
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
UpperCAmelCase : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__snake_case )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
UpperCAmelCase : int = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
UpperCAmelCase : List[Any] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__snake_case ) ):
UpperCAmelCase : Any = LoRALayer(module.q_proj , rank=16 )
UpperCAmelCase : Optional[int] = LoRALayer(module.k_proj , rank=16 )
UpperCAmelCase : Union[str, Any] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
UpperCAmelCase : int = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
UpperCAmelCase : Tuple = model.forward(**__snake_case )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__snake_case , __snake_case ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__snake_case , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class UpperCamelCase_ ( _snake_case ):
lowercase = """gpt2-xl"""
lowercase = 3.3191_8548_5415_2187
| 265 |
'''simple docstring'''
class lowercase__ :
'''simple docstring'''
def __init__( self , __snake_case = "" , __snake_case = False ):
# Mapping from the first character of the prefix of the node
_SCREAMING_SNAKE_CASE : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
_SCREAMING_SNAKE_CASE : List[Any] = is_leaf
_SCREAMING_SNAKE_CASE : Optional[Any] = prefix
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = 0
for q, w in zip(self.prefix , __snake_case ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self , __snake_case ):
for word in words:
self.insert(__snake_case )
def UpperCAmelCase_ ( self , __snake_case ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
_SCREAMING_SNAKE_CASE : List[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_SCREAMING_SNAKE_CASE : List[str] = RadixNode(prefix=__snake_case , is_leaf=__snake_case )
else:
_SCREAMING_SNAKE_CASE : int = self.nodes[word[0]]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = incoming_node.match(
__snake_case )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__snake_case )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = remaining_prefix
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.nodes[matching_string[0]]
_SCREAMING_SNAKE_CASE : List[Any] = RadixNode(__snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = aux_node
if remaining_word == "":
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
else:
self.nodes[matching_string[0]].insert(__snake_case )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.nodes.get(word[0] , __snake_case )
if not incoming_node:
return False
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = incoming_node.match(
__snake_case )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__snake_case )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Tuple = self.nodes.get(word[0] , __snake_case )
if not incoming_node:
return False
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = incoming_node.match(
__snake_case )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__snake_case )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_SCREAMING_SNAKE_CASE : Optional[Any] = list(self.nodes.values() )[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
_SCREAMING_SNAKE_CASE : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_SCREAMING_SNAKE_CASE : List[str] = False
# If there is 1 edge, we merge it with its child
else:
_SCREAMING_SNAKE_CASE : int = list(incoming_node.nodes.values() )[0]
_SCREAMING_SNAKE_CASE : Tuple = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_SCREAMING_SNAKE_CASE : str = merging_node.nodes
return True
def UpperCAmelCase_ ( self , __snake_case = 0 ):
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = """banana bananas bandana band apple all beast""".split()
_SCREAMING_SNAKE_CASE : Optional[Any] = RadixNode()
root.insert_many(SCREAMING_SNAKE_CASE__ )
assert all(root.find(SCREAMING_SNAKE_CASE__ ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def snake_case_ ( ):
"""simple docstring"""
assert test_trie()
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = RadixNode()
_SCREAMING_SNAKE_CASE : Optional[int] = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(SCREAMING_SNAKE_CASE__ )
print("""Words:""" , SCREAMING_SNAKE_CASE__ )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 200 | 0 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Any:
"""simple docstring"""
a = []
for part_id in partition_order:
a = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(snake_case_ ):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
"""simple docstring"""
a = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a = spark.range(1_0_0 ).repartition(1 )
a = Spark(snake_case_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=1_6 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 5_0
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
"""simple docstring"""
a = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a = spark.range(1_0 ).repartition(2 )
a = [1, 0]
a = _generate_iterable_examples(snake_case_, snake_case_ ) # Reverse the partitions.
a = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_, snake_case_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a , a = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
"""simple docstring"""
a = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a = spark.range(1_0 ).repartition(1 )
a = SparkExamplesIterable(snake_case_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case_ ):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
"""simple docstring"""
a = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a = spark.range(3_0 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
a = lambda snake_case_ : x.reverse()
a = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_, [2, 1, 0] )
a = SparkExamplesIterable(snake_case_ ).shuffle_data_sources(snake_case_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case_ ):
a , a = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
a = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a = spark.range(2_0 ).repartition(4 )
# Partitions 0 and 2
a = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=0, num_workers=2 )
assert shard_it_a.n_shards == 2
a = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_, [0, 2] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
a , a = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=1, num_workers=2 )
assert shard_it_a.n_shards == 2
a = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_, [1, 3] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
a , a = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
"""simple docstring"""
a = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a = spark.range(1_0_0 ).repartition(1 )
a = Spark(snake_case_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_0_0
| 330 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[Any] = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
UpperCamelCase__ : Any = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
UpperCamelCase__ : List[Any] = dict(zip(vocab, range(len(vocab))))
UpperCamelCase__ : Any = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ : Optional[Any] = Path(tmpdirname)
UpperCamelCase__ : Tuple = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
UpperCamelCase__ : int = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
UpperCamelCase__ : Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
UpperCamelCase__ : Dict = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
UpperCamelCase__ : Union[str, Any] = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
UpperCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
UpperCamelCase__ : List[str] = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
UpperCamelCase__ : Tuple = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 330 | 1 |
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Dict = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
lowercase__ : str = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def __lowercase ( _a , _a ):
snake_case_ : Optional[int] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
snake_case_ : List[Any] = int(re.match(r'''.*layer_(\d*).*''' , _a )[1] )
layer_number -= 3
return f"h.{layer_number}." + key
def __lowercase ( _a ):
if dtype == torch.bool:
return 1 / 8
snake_case_ : Dict = re.search(r'''[^\d](\d+)$''' , str(_a ) )
if bit_search is None:
raise ValueError(f"`dtype` is not a valid dtype: {dtype}." )
snake_case_ : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def __lowercase ( _a , _a , _a , _a , _a ):
# Construct model
if bloom_config_file == "":
snake_case_ : int = BloomConfig()
else:
snake_case_ : List[str] = BloomConfig.from_json_file(_a )
if shard_model:
snake_case_ : List[str] = os.listdir(_a )
snake_case_ : int = sorted(filter(lambda _a : s.startswith('''layer''' ) and "model_00" in s , _a ) )
snake_case_ : List[str] = {'''weight_map''': {}, '''metadata''': {}}
snake_case_ : Any = 0
snake_case_ : Union[str, Any] = None
snake_case_ : List[str] = BloomConfig()
for j, file in enumerate(_a ):
print('''Processing file: {}'''.format(_a ) )
snake_case_ : Dict = None
for i in range(_a ):
# load all TP files
snake_case_ : Union[str, Any] = file.replace('''model_00''' , f"model_0{i}" )
snake_case_ : List[str] = torch.load(os.path.join(_a , _a ) , map_location='''cpu''' )
# Rename keys in the transformers names
snake_case_ : str = list(temp.keys() )
for key in keys:
snake_case_ : Any = temp.pop(_a )
if tensors is None:
snake_case_ : Any = temp
else:
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case_ : Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case_ : Any = tensors[key] / pretraining_tp
torch.save(
_a , os.path.join(
_a , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_a ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
snake_case_ : List[str] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
snake_case_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_a ) ).zfill(5 ) )
snake_case_ : int = BloomConfig()
snake_case_ : Any = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
snake_case_ : Dict = total_size
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_a , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
snake_case_ : Tuple = json.dumps(_a , indent=2 , sort_keys=_a ) + '''\n'''
f.write(_a )
else:
snake_case_ : Union[str, Any] = BloomModel(_a )
snake_case_ : List[str] = os.listdir(_a )
snake_case_ : Dict = sorted(filter(lambda _a : s.startswith('''layer''' ) and "model_00" in s , _a ) )
snake_case_ : List[Any] = None
for i, file in enumerate(_a ):
snake_case_ : Optional[Any] = None
for i in range(_a ):
# load all TP files
snake_case_ : List[str] = file.replace('''model_00''' , f"model_0{i}" )
snake_case_ : Optional[Any] = torch.load(os.path.join(_a , _a ) , map_location='''cpu''' )
# Rename keys in the transformers names
snake_case_ : str = list(temp.keys() )
for key in keys:
snake_case_ : str = temp.pop(_a )
if tensors is None:
snake_case_ : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case_ : Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case_ : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=_a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case_ : Union[str, Any] = tensors[key] / pretraining_tp
snake_case_ : Any = model.load_state_dict(_a , strict=_a )
assert not other_keys.unexpected_keys, f"The keys {other_keys.unexpected_keys} are unexpected"
if missing_keys is None:
snake_case_ : Optional[int] = set(other_keys.missing_keys )
else:
snake_case_ : Tuple = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"The keys {missing_keys} are missing"
# Save pytorch-model
os.makedirs(_a , exist_ok=_a )
snake_case_ : List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
snake_case_ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" )
if config.torch_dtype is not None:
snake_case_ : Optional[Any] = model.to(config.torch_dtype )
torch.save(model.state_dict() , _a )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
lowercase__ : List[Any] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 264 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Dict = logging.get_logger(__name__)
def __lowercase ( _a , _a=False ):
snake_case_ : List[str] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def __lowercase ( _a , _a , _a=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ : List[str] = ''''''
else:
snake_case_ : Dict = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ : List[str] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case_ : Optional[int] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Any = in_proj_weight[
: config.hidden_size, :
]
snake_case_ : Dict = in_proj_bias[: config.hidden_size]
snake_case_ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ : Dict = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ : str = in_proj_bias[-config.hidden_size :]
def __lowercase ( _a ):
snake_case_ : Dict = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_a , _a )
def __lowercase ( _a , _a , _a ):
snake_case_ : Union[str, Any] = dct.pop(_a )
snake_case_ : Union[str, Any] = val
def __lowercase ( ):
snake_case_ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ : Tuple = Image.open(requests.get(_a , stream=_a ).raw )
return im
@torch.no_grad()
def __lowercase ( _a , _a , _a=False ):
snake_case_ : str = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=_a , )
snake_case_ : Tuple = ViTHybridConfig(backbone_config=_a , image_size=384 , num_labels=1_000 )
snake_case_ : int = False
# load original model from timm
snake_case_ : str = timm.create_model(_a , pretrained=_a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_a )
snake_case_ : int = create_rename_keys(_a , _a )
for src, dest in rename_keys:
rename_key(_a , _a , _a )
read_in_q_k_v(_a , _a , _a )
snake_case_ : Optional[Any] = '''huggingface/label-files'''
snake_case_ : Any = '''imagenet-1k-id2label.json'''
snake_case_ : Dict = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ : Dict = {int(_a ): v for k, v in idalabel.items()}
snake_case_ : Optional[int] = idalabel
snake_case_ : Optional[Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case_ : Optional[Any] = ViTHybridModel(_a ).eval()
else:
snake_case_ : Any = ViTHybridForImageClassification(_a ).eval()
model.load_state_dict(_a )
# create image processor
snake_case_ : Optional[Any] = create_transform(**resolve_data_config({} , model=_a ) )
snake_case_ : List[Any] = transform.transforms
snake_case_ : Optional[Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
snake_case_ : List[Any] = ViTHybridImageProcessor(
do_resize=_a , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_a , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_a , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case_ : Optional[int] = prepare_img()
snake_case_ : Optional[int] = transform(_a ).unsqueeze(0 )
snake_case_ : int = processor(_a , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_a , _a )
# verify logits
with torch.no_grad():
snake_case_ : List[str] = model(_a )
snake_case_ : Any = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
snake_case_ : Optional[Any] = timm_model.forward_features(_a )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_a , outputs.pooler_output , atol=1E-3 )
else:
snake_case_ : int = timm_model(_a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_a , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_a ).mkdir(exist_ok=_a )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_a )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_a )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
lowercase__ : Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 264 | 1 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list:
"""simple docstring"""
A : Dict = len(_lowerCAmelCase )
A : Union[str, Any] = []
for i in range(len(_lowerCAmelCase ) - pat_len + 1 ):
A : List[str] = True
for j in range(_lowerCAmelCase ):
if s[i + j] != pattern[j]:
A : List[str] = False
break
if match_found:
position.append(_lowerCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC"""))
| 369 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Union[str, Any] = """https://openaipublic.azureedge.net/jukebox/models/"""
SCREAMING_SNAKE_CASE_:Optional[int] = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def __UpperCamelCase ( _lowerCAmelCase ) -> Dict:
"""simple docstring"""
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
A : Optional[int] = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
A : Any = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
A : str = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
A : Optional[Any] = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
A : List[str] = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
A : Tuple = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
A : List[str] = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
A : Optional[int] = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
"""simple docstring"""
A : List[str] = {}
import re
A : Any = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
A : str = re.compile(
R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
A : Union[str, Any] = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
A : List[Any] = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
A : Optional[Any] = re.compile(
R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
A : List[str] = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
A : Optional[Any] = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
A : Tuple = re.compile(
R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
A : List[Any] = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCAmelCase ):
A : Optional[Any] = re_encoder_block_conv_in.match(_lowerCAmelCase )
A : Tuple = regex_match.groups()
A : str = int(groups[2] ) * 2 + int(groups[3] )
A : int = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
A : Any = re_encoder_block_conv_in.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCAmelCase ):
A : Optional[int] = re_encoder_block_resnet.match(_lowerCAmelCase )
A : str = regex_match.groups()
A : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
A : Any = {"""1""": 1, """3""": 2}[groups[-2]]
A : int = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
A : List[str] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
A : str = prefix + resnet_block
A : str = re_encoder_block_resnet.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCAmelCase ):
A : List[str] = re_encoder_block_proj_out.match(_lowerCAmelCase )
A : List[Any] = regex_match.groups()
A : List[Any] = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
A : Optional[int] = re_encoder_block_proj_out.sub(_lowerCAmelCase , _lowerCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCAmelCase ):
A : Union[str, Any] = re_decoder_block_conv_out.match(_lowerCAmelCase )
A : Dict = regex_match.groups()
A : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
A : Optional[int] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
A : int = re_decoder_block_conv_out.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCAmelCase ):
A : Optional[int] = re_decoder_block_resnet.match(_lowerCAmelCase )
A : List[Any] = regex_match.groups()
A : List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2
A : str = {"""1""": 1, """3""": 2}[groups[-2]]
A : Optional[int] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
A : Union[str, Any] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
A : Tuple = prefix + resnet_block
A : Tuple = re_decoder_block_resnet.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCAmelCase ):
A : Optional[Any] = re_decoder_block_proj_in.match(_lowerCAmelCase )
A : Any = regex_match.groups()
A : Optional[Any] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
A : Dict = re_decoder_block_proj_in.sub(_lowerCAmelCase , _lowerCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCAmelCase ):
A : Optional[int] = re_prior_cond_conv_out.match(_lowerCAmelCase )
A : List[Any] = regex_match.groups()
A : List[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
A : Tuple = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
A : List[str] = re_prior_cond_conv_out.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCAmelCase ):
A : Any = re_prior_cond_resnet.match(_lowerCAmelCase )
A : Any = regex_match.groups()
A : Tuple = int(groups[1] ) * 2 + int(groups[2] ) - 2
A : Optional[Any] = {"""1""": 1, """3""": 2}[groups[-2]]
A : Tuple = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
A : List[str] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
A : Dict = prefix + resnet_block
A : Union[str, Any] = re_prior_cond_resnet.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCAmelCase ):
A : List[Any] = re_prior_cond_proj_in.match(_lowerCAmelCase )
A : Optional[int] = regex_match.groups()
A : Tuple = f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
A : Optional[int] = re_prior_cond_proj_in.sub(_lowerCAmelCase , _lowerCAmelCase )
# keep original key
else:
A : str = original_key
A : List[str] = replace_key(_lowerCAmelCase )
if f'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(f'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape:
A : str = model_state_dict[f'''{key_prefix}.{key}''']
print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
A : Union[str, Any] = original_key
A : Union[str, Any] = original_key
A : List[str] = value
return new_dict
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Tuple:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ):
A : Optional[Any] = requests.get(f'''{PREFIX}{file}''' , allow_redirects=_lowerCAmelCase )
os.makedirs(f'''{pytorch_dump_folder_path}/''' , exist_ok=_lowerCAmelCase )
open(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' , """wb""" ).write(r.content )
A : Optional[int] = MODEL_MAPPING[model_name.split("""/""" )[-1]]
A : List[Any] = JukeboxConfig.from_pretrained(_lowerCAmelCase )
A : Dict = JukeboxModel(_lowerCAmelCase )
A : str = []
A : Optional[int] = {}
for i, dict_name in enumerate(_lowerCAmelCase ):
A : str = torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )["""model"""]
A : Optional[int] = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
A : Dict = old_dic[k]
elif k.endswith(""".w""" ):
A : Optional[Any] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
A : Union[str, Any] = old_dic[k]
else:
A : Optional[int] = old_dic[k]
A : List[str] = """vqvae""" if i == 0 else f'''priors.{3 - i}'''
A : List[Any] = fix_jukebox_keys(_lowerCAmelCase , model.state_dict() , _lowerCAmelCase , _lowerCAmelCase )
weight_dict.append(_lowerCAmelCase )
A : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCAmelCase )
for i in range(len(_lowerCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
with open(f'''{pytorch_dump_folder_path}/mapping.json''' , """w""" ) as txtfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
return weight_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
SCREAMING_SNAKE_CASE_:int = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 115 | 0 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCamelCase : int = logging.get_logger(__name__)
class A__ ( __a ):
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , A_ , )
super().__init__(*A_ , **A_ )
| 52 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
A =input('Enter image url: ').strip()
print(f"""Downloading image from {url} ...""")
A =BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
A =soup.find('meta', {'property': 'og:image'})['content']
A =requests.get(image_url).content
A =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 34 | 0 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ = HfArgumentParser(lowercase_ )
A__ = parser.parse_args_into_dataclasses()[0]
A__ = TensorFlowBenchmark(args=lowercase_ )
try:
A__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
A__ = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
A__ = ''' '''.join(str(lowercase_ ).split(''' ''' )[:-1] )
A__ = ''''''
A__ = eval(str(lowercase_ ).split(''' ''' )[-1] )
A__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase_ )
if len(lowercase_ ) > 0:
A__ = full_error_msg + begin_error_msg + str(lowercase_ )
raise ValueError(lowercase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 231 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
'''simple docstring'''
A__ = [[1, 2, 4], [1, 2, 3, 4]]
A__ = DisjunctiveConstraint(UpperCAmelCase__)
self.assertTrue(isinstance(dc.token_ids , UpperCAmelCase__))
with self.assertRaises(UpperCAmelCase__):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(UpperCAmelCase__):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def SCREAMING_SNAKE_CASE ( self : int) ->str:
'''simple docstring'''
A__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(UpperCAmelCase__):
DisjunctiveConstraint(UpperCAmelCase__) # fails here
def SCREAMING_SNAKE_CASE ( self : Any) ->str:
'''simple docstring'''
A__ = [[1, 2, 3], [1, 2, 4]]
A__ = DisjunctiveConstraint(UpperCAmelCase__)
A__ , A__ , A__ = dc.update(1)
A__ = stepped is True and completed is False and reset is False
self.assertTrue(UpperCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
A__ , A__ , A__ = dc.update(2)
A__ = stepped is True and completed is False and reset is False
self.assertTrue(UpperCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
A__ , A__ , A__ = dc.update(3)
A__ = stepped is True and completed is True and reset is False
self.assertTrue(UpperCAmelCase__)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple:
'''simple docstring'''
A__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
A__ = DisjunctiveConstraint(UpperCAmelCase__)
A__ , A__ , A__ = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
A__ , A__ , A__ = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
A__ , A__ , A__ = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
A__ , A__ , A__ = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
A__ , A__ , A__ = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
A__ , A__ , A__ = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
A__ , A__ , A__ = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 231 | 1 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def decorator(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , "handle_key" , [] )
handle += [key]
setattr(_SCREAMING_SNAKE_CASE , "handle_key" , _SCREAMING_SNAKE_CASE )
return func
return decorator
def a__ ( *_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def decorator(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , "handle_key" , [] )
handle += keys
setattr(_SCREAMING_SNAKE_CASE , "handle_key" , _SCREAMING_SNAKE_CASE )
return func
return decorator
class _lowerCamelCase ( _lowercase ):
def __new__(cls , __a , __a , __a ) -> List[str]:
UpperCamelCase = super().__new__(cls , __a , __a , __a )
if not hasattr(__a , "key_handler" ):
setattr(__a , "key_handler" , {} )
setattr(__a , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
UpperCamelCase = getattr(__a , "handle_key" , [] )
for key in handled_keys:
UpperCamelCase = value
return new_cls
@staticmethod
def snake_case_ (cls ) -> Dict:
UpperCamelCase = get_character()
if char != KEYMAP["undefined"]:
UpperCamelCase = ord(__a )
UpperCamelCase = cls.key_handler.get(__a )
if handler:
UpperCamelCase = char
return handler(cls )
else:
return None
def a__ ( cls ):
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 153 |
"""simple docstring"""
class _lowerCamelCase :
def __init__(self , __a ) -> None:
UpperCamelCase = len(__a )
UpperCamelCase = [0] * len_array
if len_array > 0:
UpperCamelCase = array[0]
for i in range(1 , __a ):
UpperCamelCase = self.prefix_sum[i - 1] + array[i]
def snake_case_ (self , __a , __a ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def snake_case_ (self , __a ) -> bool:
UpperCamelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 153 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
UpperCAmelCase : str = logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , *UpperCamelCase : str , **UpperCamelCase : List[str] ):
'''simple docstring'''
warnings.warn(
"""The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ChineseCLIPImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 320 |
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase )
__UpperCAmelCase : List[Any] = sum(_UpperCamelCase )
__UpperCAmelCase : Optional[int] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__UpperCAmelCase : Any = True
for i in range(1 , s + 1 ):
__UpperCAmelCase : List[Any] = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__UpperCAmelCase : Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
__UpperCAmelCase : Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__UpperCAmelCase : Optional[int] = s - 2 * j
break
return diff
| 320 | 1 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def UpperCAmelCase_ ( ) -> Dict:
__lowercase : Any = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
__lowercase : Optional[Any] = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
# Let's go
__lowercase : Optional[Any] = parser.parse_args()
if not hasattr(_SCREAMING_SNAKE_CASE , '''func''' ):
parser.print_help()
exit(1 )
# Run
__lowercase : List[str] = args.func(_SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 156 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __lowercase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
SCREAMING_SNAKE_CASE = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
# Let's go
SCREAMING_SNAKE_CASE = parser.parse_args()
if not hasattr(_SCREAMING_SNAKE_CASE , """func""" ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE = args.func(_SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 296 | 0 |
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase__ ( lowerCamelCase : int = 1000000 ,lowerCamelCase : int = 10 ):
_A : defaultdict = defaultdict(lowerCamelCase )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_A : Optional[int] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_A : Any = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowerCamelCase ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 227 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
a = KandinskyVaaPriorPipeline
a = ["prompt"]
a = ["prompt", "negative_prompt"]
a = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
a = False
@property
def A ( self : List[str]):
return 32
@property
def A ( self : List[Any]):
return 32
@property
def A ( self : Dict):
return self.time_input_dim
@property
def A ( self : Tuple):
return self.time_input_dim * 4
@property
def A ( self : Optional[int]):
return 100
@property
def A ( self : Dict):
_A : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def A ( self : Optional[Any]):
torch.manual_seed(0)
_A : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE)
@property
def A ( self : List[Any]):
torch.manual_seed(0)
_A : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
_A : Any = PriorTransformer(**SCREAMING_SNAKE_CASE)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_A : str = nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def A ( self : List[str]):
torch.manual_seed(0)
_A : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
_A : Union[str, Any] = CLIPVisionModelWithProjection(SCREAMING_SNAKE_CASE)
return model
@property
def A ( self : int):
_A : Optional[Any] = CLIPImageProcessor(
crop_size=224 , do_center_crop=SCREAMING_SNAKE_CASE , do_normalize=SCREAMING_SNAKE_CASE , do_resize=SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
def A ( self : Optional[Any]):
_A : Optional[int] = self.dummy_prior
_A : Dict = self.dummy_image_encoder
_A : Dict = self.dummy_text_encoder
_A : str = self.dummy_tokenizer
_A : Optional[Any] = self.dummy_image_processor
_A : Optional[Any] = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=SCREAMING_SNAKE_CASE , clip_sample_range=10.0 , )
_A : Dict = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def A ( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str]=0):
if str(SCREAMING_SNAKE_CASE).startswith('mps'):
_A : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE)
else:
_A : int = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE)
_A : List[Any] = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def A ( self : List[Any]):
_A : str = 'cpu'
_A : Tuple = self.get_dummy_components()
_A : List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE)
_A : Any = pipe.to(SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_A : Dict = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE))
_A : str = output.image_embeds
_A : Optional[int] = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE) , return_dict=SCREAMING_SNAKE_CASE , )[0]
_A : Optional[int] = image[0, -10:]
_A : int = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
_A : Dict = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def A ( self : Any):
_A : Tuple = torch_device == 'cpu'
_A : Optional[int] = True
_A : Tuple = False
self._test_inference_batch_single_identical(
test_max_difference=SCREAMING_SNAKE_CASE , relax_max_difference=SCREAMING_SNAKE_CASE , test_mean_pixel_difference=SCREAMING_SNAKE_CASE , )
@skip_mps
def A ( self : int):
_A : Tuple = torch_device == 'cpu'
_A : Optional[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=SCREAMING_SNAKE_CASE , test_mean_pixel_difference=SCREAMING_SNAKE_CASE , )
| 227 | 1 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __A ( __lowerCamelCase ) -> Dict:
a = model.config
a = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
a = MBartConfig(
is_decoder=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , add_cross_attention=__lowerCamelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__lowerCamelCase , add_final_layer_norm=__lowerCamelCase , )
return encoder_config, decoder_config
def __A ( __lowerCamelCase ) -> List[str]:
if "encoder.model" in name:
a = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
a = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
a = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
a = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
a = """encoder.""" + name
if "attn.proj" in name:
a = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
a = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
a = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
a = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
a = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
a = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
a = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
a = """encoder.layernorm.bias"""
return name
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Any:
for key in orig_state_dict.copy().keys():
a = orig_state_dict.pop(__lowerCamelCase )
if "qkv" in key:
a = key.split(""".""" )
a = int(key_split[3] )
a = int(key_split[5] )
a = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a = val[:dim, :]
a = val[dim : dim * 2, :]
a = val[-dim:, :]
else:
a = val[:dim]
a = val[dim : dim * 2]
a = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
a = val
return orig_state_dict
def __A ( __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=False ) -> List[str]:
# load original model
a = DonutModel.from_pretrained(__lowerCamelCase ).eval()
# load HuggingFace model
a , a = get_configs(__lowerCamelCase )
a = DonutSwinModel(__lowerCamelCase )
a = MBartForCausalLM(__lowerCamelCase )
a = VisionEncoderDecoderModel(encoder=__lowerCamelCase , decoder=__lowerCamelCase )
model.eval()
a = original_model.state_dict()
a = convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# verify results on scanned document
a = load_dataset("""hf-internal-testing/example-documents""" )
a = dataset["""test"""][0]["""image"""].convert("""RGB""" )
a = XLMRobertaTokenizerFast.from_pretrained(__lowerCamelCase , from_slow=__lowerCamelCase )
a = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
a = DonutProcessor(__lowerCamelCase , __lowerCamelCase )
a = processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
a = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
a = """When is the coffee break?"""
a = task_prompt.replace("""{user_input}""" , __lowerCamelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
a = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
a = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
a = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
a = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
a = """hello world"""
else:
raise ValueError("""Model name not supported""" )
a = original_model.decoder.tokenizer(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors="""pt""" )[
"""input_ids"""
]
a = original_model.encoder.model.patch_embed(__lowerCamelCase )
a , a = model.encoder.embeddings(__lowerCamelCase )
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
# verify encoder hidden states
a = original_model.encoder(__lowerCamelCase )
a = model.encoder(__lowerCamelCase ).last_hidden_state
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-2 )
# verify decoder hidden states
a = original_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).logits
a = model(__lowerCamelCase , decoder_input_ids=__lowerCamelCase ).logits
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 228 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__UpperCamelCase : Optional[Any] = False
@skip_mps
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = StableDiffusionAttendAndExcitePipeline
UpperCamelCase__ = False
UpperCamelCase__ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
UpperCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowerCamelCase__ ( cls :Union[str, Any] ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__magic_name__ )
@classmethod
def lowerCamelCase__ ( cls :Union[str, Any] ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__magic_name__ , )
a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
a = CLIPTextModel(__magic_name__ )
a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
a = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ ( self :Dict , __magic_name__ :Optional[int] , __magic_name__ :Optional[int]=0 ):
'''simple docstring'''
if str(__magic_name__ ).startswith("""mps""" ):
a = torch.manual_seed(__magic_name__ )
else:
a = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
a = a = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = """cpu"""
a = self.get_dummy_components()
a = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
a = self.get_dummy_inputs(__magic_name__ )
a = pipe(**__magic_name__ ).images
a = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
a = np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] )
a = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__magic_name__ , 1E-3 )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@classmethod
def lowerCamelCase__ ( cls :int ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__magic_name__ )
@classmethod
def lowerCamelCase__ ( cls :Any ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__magic_name__ )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = torch.manual_seed(51 )
a = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__magic_name__ , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
a = """a painting of an elephant with glasses"""
a = [5, 7]
a = pipe(
prompt=__magic_name__ , token_indices=__magic_name__ , guidance_scale=7.5 , generator=__magic_name__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
a = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5E-1
| 228 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _a ( unittest.TestCase ):
def _lowercase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_snake_case = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" ,safety_checker=_SCREAMING_SNAKE_CASE ,cache_dir=_SCREAMING_SNAKE_CASE )
_snake_case = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE ,os.listdir(_SCREAMING_SNAKE_CASE )[0] ,"snapshots" ) )]
_snake_case = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class _a ( unittest.TestCase ):
def _lowercase ( self ) -> List[str]:
_snake_case , _snake_case = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" ,safety_checker=_SCREAMING_SNAKE_CASE )
_snake_case = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
_snake_case = jax.random.PRNGKey(0 )
_snake_case = 4
_snake_case = jax.device_count()
_snake_case = num_samples * [prompt]
_snake_case = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_snake_case = replicate(_SCREAMING_SNAKE_CASE )
_snake_case = jax.random.split(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = shard(_SCREAMING_SNAKE_CASE )
_snake_case = pipeline(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(_SCREAMING_SNAKE_CASE ,dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
_snake_case = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_SCREAMING_SNAKE_CASE ) == num_samples
def _lowercase ( self ) -> Any:
_snake_case , _snake_case = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" ,revision="flax" ,safety_checker=_SCREAMING_SNAKE_CASE )
_snake_case = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
_snake_case = jax.random.PRNGKey(0 )
_snake_case = 50
_snake_case = jax.device_count()
_snake_case = num_samples * [prompt]
_snake_case = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_snake_case = replicate(_SCREAMING_SNAKE_CASE )
_snake_case = jax.random.split(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = shard(_SCREAMING_SNAKE_CASE )
_snake_case = pipeline(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE ,dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def _lowercase ( self ) -> str:
_snake_case , _snake_case = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" ,revision="bf16" ,dtype=jnp.bfloataa ,safety_checker=_SCREAMING_SNAKE_CASE )
_snake_case = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
_snake_case = jax.random.PRNGKey(0 )
_snake_case = 50
_snake_case = jax.device_count()
_snake_case = num_samples * [prompt]
_snake_case = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_snake_case = replicate(_SCREAMING_SNAKE_CASE )
_snake_case = jax.random.split(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = shard(_SCREAMING_SNAKE_CASE )
_snake_case = pipeline(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE ,dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def _lowercase ( self ) -> Dict:
_snake_case , _snake_case = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" ,revision="bf16" ,dtype=jnp.bfloataa )
_snake_case = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
_snake_case = jax.random.PRNGKey(0 )
_snake_case = 50
_snake_case = jax.device_count()
_snake_case = num_samples * [prompt]
_snake_case = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_snake_case = replicate(_SCREAMING_SNAKE_CASE )
_snake_case = jax.random.split(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = shard(_SCREAMING_SNAKE_CASE )
_snake_case = pipeline(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE ,dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def _lowercase ( self ) -> Optional[Any]:
_snake_case = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="scaled_linear" ,set_alpha_to_one=_SCREAMING_SNAKE_CASE ,steps_offset=1 ,)
_snake_case , _snake_case = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" ,revision="bf16" ,dtype=jnp.bfloataa ,scheduler=_SCREAMING_SNAKE_CASE ,safety_checker=_SCREAMING_SNAKE_CASE ,)
_snake_case = scheduler.create_state()
_snake_case = scheduler_state
_snake_case = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
_snake_case = jax.random.PRNGKey(0 )
_snake_case = 50
_snake_case = jax.device_count()
_snake_case = num_samples * [prompt]
_snake_case = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_snake_case = replicate(_SCREAMING_SNAKE_CASE )
_snake_case = jax.random.split(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = shard(_SCREAMING_SNAKE_CASE )
_snake_case = pipeline(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE ,dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def _lowercase ( self ) -> int:
_snake_case = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
_snake_case = jax.device_count()
_snake_case = num_samples * [prompt]
_snake_case = jax.random.split(jax.random.PRNGKey(0 ) ,_SCREAMING_SNAKE_CASE )
_snake_case , _snake_case = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" ,revision="bf16" ,dtype=jnp.bfloataa ,safety_checker=_SCREAMING_SNAKE_CASE ,)
_snake_case = replicate(_SCREAMING_SNAKE_CASE )
_snake_case = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
_snake_case = shard(_SCREAMING_SNAKE_CASE )
_snake_case = pipeline(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_snake_case = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_snake_case , _snake_case = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" ,revision="bf16" ,dtype=jnp.bfloataa ,safety_checker=_SCREAMING_SNAKE_CASE ,use_memory_efficient_attention=_SCREAMING_SNAKE_CASE ,)
_snake_case = replicate(_SCREAMING_SNAKE_CASE )
_snake_case = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
_snake_case = shard(_SCREAMING_SNAKE_CASE )
_snake_case = pipeline(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,jit=_SCREAMING_SNAKE_CASE ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_snake_case = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 142 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = ["""image_processor""", """tokenizer"""]
SCREAMING_SNAKE_CASE_ : List[Any] = """BridgeTowerImageProcessor"""
SCREAMING_SNAKE_CASE_ : List[Any] = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
super().__init__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 0 ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> BatchEncoding:
_snake_case = self.tokenizer(
text=_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,stride=_SCREAMING_SNAKE_CASE ,pad_to_multiple_of=_SCREAMING_SNAKE_CASE ,return_token_type_ids=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,return_overflowing_tokens=_SCREAMING_SNAKE_CASE ,return_special_tokens_mask=_SCREAMING_SNAKE_CASE ,return_offsets_mapping=_SCREAMING_SNAKE_CASE ,return_length=_SCREAMING_SNAKE_CASE ,verbose=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
# add pixel_values + pixel_mask
_snake_case = self.image_processor(
_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,do_normalize=_SCREAMING_SNAKE_CASE ,do_center_crop=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
encoding.update(_SCREAMING_SNAKE_CASE )
return encoding
def _lowercase ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[str]:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[str]:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ) -> Any:
_snake_case = self.tokenizer.model_input_names
_snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 142 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 64 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str]=False ):
"""simple docstring"""
_snake_case : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_snake_case : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict , snake_case__ : List[str]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_snake_case : List[Any] = """"""
else:
_snake_case : List[Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case : Optional[Any] = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
_snake_case : Optional[Any] = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
_snake_case : Union[str, Any] = in_proj_bias[: config.hidden_size]
_snake_case : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
_snake_case : List[str] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = dct.pop(snake_case__ )
_snake_case : Union[str, Any] = val
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
_snake_case : str = ViTMSNConfig()
_snake_case : Any = 10_00
_snake_case : Tuple = """datasets/huggingface/label-files"""
_snake_case : Dict = """imagenet-1k-id2label.json"""
_snake_case : int = json.load(open(hf_hub_download(snake_case__ , snake_case__ ) , """r""" ) )
_snake_case : Any = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : List[Any] = idalabel
_snake_case : str = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_snake_case : Tuple = 3_84
_snake_case : Dict = 15_36
_snake_case : Tuple = 6
elif "l16" in checkpoint_url:
_snake_case : Any = 10_24
_snake_case : int = 40_96
_snake_case : str = 24
_snake_case : Optional[int] = 16
_snake_case : List[Any] = 0.1
elif "b4" in checkpoint_url:
_snake_case : Tuple = 4
elif "l7" in checkpoint_url:
_snake_case : int = 7
_snake_case : Dict = 10_24
_snake_case : Optional[Any] = 40_96
_snake_case : Any = 24
_snake_case : Union[str, Any] = 16
_snake_case : Optional[int] = 0.1
_snake_case : int = ViTMSNModel(snake_case__ )
_snake_case : Optional[int] = torch.hub.load_state_dict_from_url(snake_case__ , map_location="""cpu""" )["""target_encoder"""]
_snake_case : List[str] = ViTImageProcessor(size=config.image_size )
remove_projection_head(snake_case__ )
_snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , base_model=snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
_snake_case : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Tuple = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
_snake_case : str = ViTImageProcessor(
size=config.image_size , image_mean=snake_case__ , image_std=snake_case__ )
_snake_case : Any = image_processor(images=snake_case__ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
_snake_case : int = model(**snake_case__ )
_snake_case : List[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_snake_case : Optional[Any] = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
_snake_case : str = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
_snake_case : Optional[int] = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
_snake_case : List[Any] = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
_snake_case : Optional[int] = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , snake_case__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 64 | 1 |
def A_ ( A__ = "The quick brown fox jumps over the lazy dog" , ) -> bool:
a__ : Tuple = set()
# Replace all the whitespace in our sentence
a__ : Optional[int] = input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(A__ ) == 26
def A_ ( A__ = "The quick brown fox jumps over the lazy dog" , ) -> bool:
a__ : Tuple = [False] * 26
for char in input_str:
if char.islower():
a__ : Union[str, Any] = True
elif char.isupper():
a__ : str = True
return all(A__ )
def A_ ( A__ = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def A_ ( ) -> None:
from timeit import timeit
a__ : List[str] = 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' , setup=A__ ) )
print(timeit('is_pangram_faster()' , setup=A__ ) )
print(timeit('is_pangram_fastest()' , setup=A__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 225 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowercase : Optional[Any] = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
lowercase : List[str] = {
"""RUCAIBox/mvp""": 1_0_2_4,
}
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Dict = VOCAB_FILES_NAMES
__A : str = PRETRAINED_VOCAB_FILES_MAP
__A : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Any = ['''input_ids''', '''attention_mask''']
__A : Tuple = MvpTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ) -> str:
'''simple docstring'''
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
a__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , lowercase) != add_prefix_space:
a__ : Dict = getattr(lowercase , pre_tok_state.pop('type'))
a__ : str = add_prefix_space
a__ : Union[str, Any] = pre_tok_class(**lowercase)
a__ : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a__ : Optional[int] = 'post_processor'
a__ : Optional[int] = getattr(self.backend_tokenizer , lowercase , lowercase)
if tokenizer_component_instance:
a__ : str = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : Any = tuple(state['sep'])
if "cls" in state:
a__ : str = tuple(state['cls'])
a__ : List[str] = False
if state.get('add_prefix_space' , lowercase) != add_prefix_space:
a__ : List[str] = add_prefix_space
a__ : List[str] = True
if state.get('trim_offsets' , lowercase) != trim_offsets:
a__ : Optional[int] = trim_offsets
a__ : List[str] = True
if changes_to_apply:
a__ : Optional[int] = getattr(lowercase , state.pop('type'))
a__ : Tuple = component_class(**lowercase)
setattr(self.backend_tokenizer , lowercase , lowercase)
@property
def __lowercase ( self) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def __lowercase ( self , lowercase) -> Any:
'''simple docstring'''
a__ : Tuple = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else value
a__ : str = value
def __lowercase ( self , *lowercase , **lowercase) -> BatchEncoding:
'''simple docstring'''
a__ : Optional[Any] = kwargs.get('is_split_into_words' , lowercase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowercase , **lowercase)
def __lowercase ( self , *lowercase , **lowercase) -> BatchEncoding:
'''simple docstring'''
a__ : List[str] = kwargs.get('is_split_into_words' , lowercase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowercase , **lowercase)
def __lowercase ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
a__ : List[str] = self._tokenizer.model.save(lowercase , name=lowercase)
return tuple(lowercase)
def __lowercase ( self , lowercase , lowercase=None) -> Union[str, Any]:
'''simple docstring'''
a__ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowercase ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__ : int = [self.sep_token_id]
a__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 225 | 1 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase = 10 , _UpperCamelCase = 22 ):
__lowerCAmelCase : Union[str, Any] = range(1 , _UpperCamelCase )
__lowerCAmelCase : Tuple = range(1 , _UpperCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(10, 22) = }') | 86 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
lowerCamelCase__ = """us-east-1""" # defaults region
@dataclass
class A__ :
A_ : str
A_ : Union[str, Any] = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
A_ : Optional[int] = {
'task_name': 'mnli',
'per_device_train_batch_size': 1_6,
'per_device_eval_batch_size': 1_6,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_0_0,
'save_steps': 5_5_0_0,
}
A_ : List[Any] = {**hyperparameters, 'max_steps': 1_0_0_0}
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowerCamelCase ( self ):
return f"{self.framework}-transfromers-test"
@property
def __lowerCamelCase ( self ):
return f"./tests/sagemaker/scripts/{self.framework}"
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : str = SageMakerTestEnvironment(framework=request.cls.framework ) | 86 | 1 |
from __future__ import annotations
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = []
create_all_state(1 , UpperCamelCase__ , UpperCamelCase__ , [] , UpperCamelCase__ )
return result
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(UpperCamelCase__ , total_number - level + 2 ):
current_list.append(UpperCamelCase__ )
create_all_state(i + 1 , UpperCamelCase__ , level - 1 , UpperCamelCase__ , UpperCamelCase__ )
current_list.pop()
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
for i in total_list:
print(*UpperCamelCase__ )
if __name__ == "__main__":
_UpperCAmelCase : str = 4
_UpperCAmelCase : Tuple = 2
_UpperCAmelCase : Optional[int] = generate_all_combinations(n, k)
print_all_state(total_list)
| 200 |
# Lint as: python3
import itertools
import os
import re
_UpperCAmelCase : str = re.compile(R"""([A-Z]+)([A-Z][a-z])""")
_UpperCAmelCase : Dict = re.compile(R"""([a-z\d])([A-Z])""")
_UpperCAmelCase : Dict = re.compile(R"""(?<!_)_(?!_)""")
_UpperCAmelCase : Tuple = re.compile(R"""(_{2,})""")
_UpperCAmelCase : Any = R"""^\w+(\.\w+)*$"""
_UpperCAmelCase : List[str] = R"""<>:/\|?*"""
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = _uppercase_uppercase_re.sub(r'\1_\2' , UpperCamelCase__ )
snake_case_ = _lowercase_uppercase_re.sub(r'\1_\2' , UpperCamelCase__ )
return name.lower()
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = _single_underscore_re.split(UpperCamelCase__ )
snake_case_ = [_multiple_underscores_re.split(UpperCamelCase__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(UpperCamelCase__ ) if n != '' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if os.path.basename(UpperCamelCase__ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if os.path.basename(UpperCamelCase__ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , UpperCamelCase__ ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(UpperCamelCase__ )}-{split}'''
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
snake_case_ = filename_prefix_for_split(UpperCamelCase__ , UpperCamelCase__ )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
snake_case_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
return F'''{filepath}*'''
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ):
'''simple docstring'''
snake_case_ = filename_prefix_for_split(UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if shard_lengths:
snake_case_ = len(UpperCamelCase__ )
snake_case_ = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(UpperCamelCase__ )]
if filetype_suffix:
snake_case_ = [filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
snake_case_ = prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 200 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_lowerCAmelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase : List[str] = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class _UpperCamelCase ( UpperCAmelCase__ ):
UpperCAmelCase_ = 42
class _UpperCamelCase ( UpperCAmelCase__ ):
def __init__( self :str , lowerCamelCase :Any , lowerCamelCase :Optional[Any] , lowerCamelCase :Any , lowerCamelCase :List[str] , lowerCamelCase :str , ) -> Optional[Any]:
super().__init__()
self.register_modules(
prior=_SCREAMING_SNAKE_CASE , image_encoder=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , renderer=_SCREAMING_SNAKE_CASE , )
def UpperCAmelCase_ ( self :Optional[int] , lowerCamelCase :Dict , lowerCamelCase :List[Any] , lowerCamelCase :Dict , lowerCamelCase :str , lowerCamelCase :Any , lowerCamelCase :Dict ) -> int:
if latents is None:
UpperCAmelCase__ = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
UpperCAmelCase__ = latents.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :Tuple=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase__ = torch.device(f'''cuda:{gpu_id}''' )
UpperCAmelCase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase_ ( self :Dict ) -> List[Any]:
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def UpperCAmelCase_ ( self :Dict , lowerCamelCase :Any , lowerCamelCase :str , lowerCamelCase :int , lowerCamelCase :int , ) -> Any:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(image[0] , torch.Tensor ):
UpperCAmelCase__ = torch.cat(_SCREAMING_SNAKE_CASE , axis=0 ) if image[0].ndim == 4 else torch.stack(_SCREAMING_SNAKE_CASE , axis=0 )
if not isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
UpperCAmelCase__ = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
UpperCAmelCase__ = image.to(dtype=self.image_encoder.dtype , device=_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = self.image_encoder(_SCREAMING_SNAKE_CASE )["""last_hidden_state"""]
UpperCAmelCase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
UpperCAmelCase__ = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase__ = torch.zeros_like(_SCREAMING_SNAKE_CASE )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self :int , lowerCamelCase :Optional[int] , lowerCamelCase :Any = 1 , lowerCamelCase :Tuple = 25 , lowerCamelCase :Any = None , lowerCamelCase :Tuple = None , lowerCamelCase :str = 4.0 , lowerCamelCase :Dict = 64 , lowerCamelCase :int = "pil" , lowerCamelCase :Union[str, Any] = True , ) -> Optional[Any]:
if isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
UpperCAmelCase__ = 1
elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
UpperCAmelCase__ = image.shape[0]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
UpperCAmelCase__ = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(
f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_SCREAMING_SNAKE_CASE )}''' )
UpperCAmelCase__ = self._execution_device
UpperCAmelCase__ = batch_size * num_images_per_prompt
UpperCAmelCase__ = guidance_scale > 1.0
UpperCAmelCase__ = self._encode_image(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# prior
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = self.scheduler.timesteps
UpperCAmelCase__ = self.prior.config.num_embeddings
UpperCAmelCase__ = self.prior.config.embedding_dim
UpperCAmelCase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
UpperCAmelCase__ = latents.reshape(latents.shape[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase__ = self.scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = self.prior(
_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , proj_embedding=_SCREAMING_SNAKE_CASE , ).predicted_image_embedding
# remove the variance
UpperCAmelCase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
UpperCAmelCase__ = noise_pred.chunk(2 )
UpperCAmelCase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
UpperCAmelCase__ = self.scheduler.step(
_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , sample=_SCREAMING_SNAKE_CASE , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = []
for i, latent in enumerate(_SCREAMING_SNAKE_CASE ):
print()
UpperCAmelCase__ = self.renderer.decode(
latent[None, :] , _SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = torch.stack(_SCREAMING_SNAKE_CASE )
if output_type not in ["np", "pil"]:
raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
UpperCAmelCase__ = images.cpu().numpy()
if output_type == "pil":
UpperCAmelCase__ = [self.numpy_to_pil(_SCREAMING_SNAKE_CASE ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 169 |
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def _snake_case ( UpperCamelCase : Dataset , UpperCamelCase : Dict[str, str] ):
UpperCAmelCase : Any = args.log_outputs
UpperCAmelCase : Any = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
UpperCAmelCase : List[Any] = load_metric("""wer""" )
UpperCAmelCase : Any = load_metric("""cer""" )
# compute metrics
UpperCAmelCase : int = wer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
UpperCAmelCase : str = cer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
# print & log results
UpperCAmelCase : Tuple = F"WER: {wer_result}\nCER: {cer_result}"
print(UpperCamelCase )
with open(F"{dataset_id}_eval_results.txt" , """w""" ) as f:
f.write(UpperCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase : str = F"log_{dataset_id}_predictions.txt"
UpperCAmelCase : Tuple = F"log_{dataset_id}_targets.txt"
with open(UpperCamelCase , """w""" ) as p, open(UpperCamelCase , """w""" ) as t:
# mapping function to write output
def write_to_file(UpperCamelCase : List[Any] , UpperCamelCase : List[Any] ):
p.write(F"{i}" + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(F"{i}" + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(UpperCamelCase , with_indices=UpperCamelCase )
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : List[str] = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase : Dict = re.sub(UpperCamelCase , """""" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase : List[str] = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
UpperCAmelCase : Optional[Any] = """ """.join(text.split(UpperCamelCase ) )
return text
def _snake_case ( UpperCamelCase : Tuple ):
# load dataset
UpperCAmelCase : Union[str, Any] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=UpperCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase : Any = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase : List[str] = dataset.cast_column("""audio""" , Audio(sampling_rate=UpperCamelCase ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase : Optional[int] = 0 if torch.cuda.is_available() else -1
UpperCAmelCase : Tuple = pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(UpperCamelCase : Any ):
UpperCAmelCase : Any = asr(
batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase : Tuple = prediction["""text"""]
UpperCAmelCase : List[str] = normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
UpperCAmelCase : int = dataset.map(UpperCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
A: List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
A: Union[str, Any] = parser.parse_args()
main(args)
| 109 | 0 |
def lowerCAmelCase_ (lowerCAmelCase__: int , lowerCAmelCase__: float , lowerCAmelCase__: float ):
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def lowerCAmelCase_ (lowerCAmelCase__: float , lowerCAmelCase__: float , lowerCAmelCase__: float ):
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def lowerCAmelCase_ (lowerCAmelCase__: float , lowerCAmelCase__: float , lowerCAmelCase__: float ):
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def lowerCAmelCase_ (lowerCAmelCase__: float , lowerCAmelCase__: float , lowerCAmelCase__: float ):
"""simple docstring"""
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
from __future__ import annotations
def lowerCAmelCase_ (lowerCAmelCase__: list[float] ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = 0.00
UpperCAmelCase_: List[str] = 0
for resistor in resistors:
if resistor <= 0:
UpperCAmelCase_: Dict = F'Resistor at index {index} has a negative or zero value!'
raise ValueError(lowerCAmelCase__ )
first_sum += 1 / float(lowerCAmelCase__ )
index += 1
return 1 / first_sum
def lowerCAmelCase_ (lowerCAmelCase__: list[float] ):
"""simple docstring"""
UpperCAmelCase_: Any = 0.00
UpperCAmelCase_: int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCAmelCase_: int = F'Resistor at index {index} has a negative value!'
raise ValueError(lowerCAmelCase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a__ ( UpperCAmelCase : Optional[int] ) -> Any:
for param in module.parameters():
UpperCAmelCase : Dict = False
def a__ ( ) -> Tuple:
UpperCAmelCase : Dict = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase : Tuple = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def a__ ( UpperCAmelCase : int ) -> Tuple:
UpperCAmelCase : int = plt.imshow(snake_case__ )
fig.axes.get_xaxis().set_visible(snake_case__ )
fig.axes.get_yaxis().set_visible(snake_case__ )
plt.show()
def a__ ( ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = datetime.now()
UpperCAmelCase : Union[str, Any] = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 336 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__a = logging.get_logger(__name__)
__a = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]:
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if config is None:
assert isinstance(self.model , SCREAMING_SNAKE_CASE_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
lowercase_ = self.model.config
else:
lowercase_ = config
lowercase_ = data_args
lowercase_ = self.config.tgt_vocab_size if isinstance(self.config , SCREAMING_SNAKE_CASE_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
''' padding..''' )
if self.args.label_smoothing == 0:
lowercase_ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowercase_ = label_smoothed_nll_loss
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
if self.optimizer is None:
lowercase_ = ['''bias''', '''LayerNorm.weight''']
lowercase_ = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
lowercase_ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowercase_ = Adafactor
lowercase_ = {'''scale_parameter''': False, '''relative_step''': False}
else:
lowercase_ = AdamW
lowercase_ = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
lowercase_ = self.args.learning_rate
if self.sharded_ddp:
lowercase_ = OSS(
params=SCREAMING_SNAKE_CASE_ , optim=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
else:
lowercase_ = optimizer_cls(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.lr_scheduler is None:
lowercase_ = self._get_lr_scheduler(SCREAMING_SNAKE_CASE_ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
lowercase_ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowercase_ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowercase_ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowercase_ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
return scheduler
def _lowercase ( self : Tuple ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowercase_ , lowercase_ = model(**SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[:2]
else:
# compute label smoothed loss
lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = torch.nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 )
lowercase_ , lowercase_ = self.loss_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[Any]:
lowercase_ = inputs.pop('''labels''' )
lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return loss
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : Dict[str, Union[torch.Tensor, Any]] , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
lowercase_ = self._prepare_inputs(SCREAMING_SNAKE_CASE_ )
lowercase_ = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowercase_ = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **SCREAMING_SNAKE_CASE_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] )
lowercase_ = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowercase_ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
# If PAD token is not defined at least EOS token has to be defined
lowercase_ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f''' padded to `max_length`={max_length}''' )
lowercase_ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowercase_ = tensor
return padded_tensor
| 30 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class _a ( lowerCamelCase__ ):
'''simple docstring'''
A : Dict = '''markuplm'''
def __init__( self, A=30_522, A=768, A=12, A=12, A=3_072, A="gelu", A=0.1, A=0.1, A=512, A=2, A=0.02, A=1E-12, A=0, A=0, A=2, A=256, A=1_024, A=216, A=1_001, A=32, A=50, A="absolute", A=True, A=None, **A, ):
'''simple docstring'''
super().__init__(
pad_token_id=__lowerCamelCase, bos_token_id=__lowerCamelCase, eos_token_id=__lowerCamelCase, **__lowerCamelCase, )
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : str = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Dict = classifier_dropout
# additional properties
SCREAMING_SNAKE_CASE : int = max_depth
SCREAMING_SNAKE_CASE : List[Any] = max_xpath_tag_unit_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = max_xpath_subs_unit_embeddings
SCREAMING_SNAKE_CASE : Tuple = tag_pad_id
SCREAMING_SNAKE_CASE : Any = subs_pad_id
SCREAMING_SNAKE_CASE : Dict = xpath_unit_hidden_size
| 356 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
UpperCamelCase_ = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
UpperCamelCase_ = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
UpperCamelCase_ = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('string', id='sequence' ),
'references': datasets.Value('string', id='sequence' ),
} ), codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'], reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
], )
def UpperCamelCase_ ( self, A, A, A=None, A=True, A=False ):
'''simple docstring'''
if rouge_types is None:
SCREAMING_SNAKE_CASE : List[Any] = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
SCREAMING_SNAKE_CASE : int = rouge_scorer.RougeScorer(rouge_types=A, use_stemmer=A )
if use_aggregator:
SCREAMING_SNAKE_CASE : Tuple = scoring.BootstrapAggregator()
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for ref, pred in zip(A, A ):
SCREAMING_SNAKE_CASE : Tuple = scorer.score(A, A )
if use_aggregator:
aggregator.add_scores(A )
else:
scores.append(A )
if use_aggregator:
SCREAMING_SNAKE_CASE : Union[str, Any] = aggregator.aggregate()
else:
SCREAMING_SNAKE_CASE : int = {}
for key in scores[0]:
SCREAMING_SNAKE_CASE : List[str] = [score[key] for score in scores]
return result
| 246 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[str] = '''mgp-str'''
def __init__(self , SCREAMING_SNAKE_CASE__=[32, 1_28] , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=27 , SCREAMING_SNAKE_CASE__=38 , SCREAMING_SNAKE_CASE__=5_02_57 , SCREAMING_SNAKE_CASE__=3_05_22 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=4.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0.02 , **SCREAMING_SNAKE_CASE__ , ) -> List[str]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = image_size
SCREAMING_SNAKE_CASE__ : int = patch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = max_token_length
SCREAMING_SNAKE_CASE__ : Any = num_character_labels
SCREAMING_SNAKE_CASE__ : Tuple = num_bpe_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_wordpiece_labels
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = mlp_ratio
SCREAMING_SNAKE_CASE__ : Tuple = distilled
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] = drop_rate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = qkv_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attn_drop_rate
SCREAMING_SNAKE_CASE__ : Dict = drop_path_rate
SCREAMING_SNAKE_CASE__ : Any = output_aa_attentions
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_range
| 25 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCAmelCase (_UpperCamelCase = 100 ):
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Optional[Any] = 2
for i in range(2 , max_n + 1 ):
__lowerCAmelCase : Any = pre_numerator
__lowerCAmelCase : Union[str, Any] = 2 * i // 3 if i % 3 == 0 else 1
__lowerCAmelCase : int = cur_numerator
__lowerCAmelCase : Dict = e_cont * pre_numerator + temp
return sum_digits(_UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }') | 86 | 0 |
"""simple docstring"""
import qiskit
def a_ ( _lowercase , _lowercase ):
_UpperCamelCase : Any = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
_UpperCamelCase : List[Any] = qiskit.QuantumCircuit(__a , __a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_UpperCamelCase : Tuple = qiskit.execute(__a , __a , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__a )
if __name__ == "__main__":
print(F"Total count for various states are: {single_qubit_measure(1, 1)}")
| 355 |
"""simple docstring"""
class _a :
def __init__( self : Optional[int], lowerCAmelCase__ : list ) -> None:
'''simple docstring'''
_UpperCamelCase : Optional[int] = set_counts
_UpperCamelCase : Any = max(lowerCAmelCase__ )
_UpperCamelCase : int = len(lowerCAmelCase__ )
_UpperCamelCase : int = [1] * num_sets
_UpperCamelCase : List[Any] = list(range(lowerCAmelCase__ ) )
def snake_case ( self : int, lowerCAmelCase__ : int, lowerCAmelCase__ : int ) -> bool:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.get_parent(lowerCAmelCase__ )
_UpperCamelCase : Any = self.get_parent(lowerCAmelCase__ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : str = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
_UpperCamelCase : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
_UpperCamelCase : Tuple = 0
_UpperCamelCase : Optional[Any] = src_parent
_UpperCamelCase : int = self.set_counts[src_parent]
_UpperCamelCase : Optional[Any] = max(self.max_set, lowerCAmelCase__ )
return True
def snake_case ( self : Optional[Any], lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
_UpperCamelCase : Union[str, Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 128 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class UpperCAmelCase_ :
lowercase__ = BlenderbotConfig
lowercase__ = {}
lowercase__ = '''gelu'''
def __init__( self : int , snake_case_ : int , snake_case_ : int=13 , snake_case_ : str=7 , snake_case_ : Dict=True , snake_case_ : Union[str, Any]=False , snake_case_ : Any=99 , snake_case_ : Optional[Any]=32 , snake_case_ : Any=2 , snake_case_ : int=4 , snake_case_ : Optional[int]=37 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : List[Any]=0.1 , snake_case_ : Union[str, Any]=20 , snake_case_ : Tuple=2 , snake_case_ : Optional[Any]=1 , snake_case_ : Any=0 , ) -> Tuple:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
def __magic_name__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ = prepare_blenderbot_inputs_dict(snake_case_ , snake_case_ , snake_case_ )
return config, inputs_dict
def __magic_name__ ( self : List[Any] , snake_case_ : List[Any] , snake_case_ : int ) -> str:
'''simple docstring'''
A__ = TFBlenderbotModel(config=snake_case_ ).get_decoder()
A__ = inputs_dict["input_ids"]
A__ = input_ids[:1, :]
A__ = inputs_dict["attention_mask"][:1, :]
A__ = inputs_dict["head_mask"]
A__ = 1
# first forward pass
A__ = model(snake_case_ , attention_mask=snake_case_ , head_mask=snake_case_ , use_cache=snake_case_ )
A__, A__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ = model(snake_case_ , attention_mask=snake_case_ )[0]
A__ = model(snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ = output_from_no_past[:, -3:, random_slice_idx]
A__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case_ , snake_case_ , rtol=1e-3 )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ) -> int:
if attention_mask is None:
A__ = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase_ ( A_, A_, unittest.TestCase ):
lowercase__ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowercase__ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowercase__ = (
{
'''conversational''': TFBlenderbotForConditionalGeneration,
'''feature-extraction''': TFBlenderbotModel,
'''summarization''': TFBlenderbotForConditionalGeneration,
'''text2text-generation''': TFBlenderbotForConditionalGeneration,
'''translation''': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
def __magic_name__ ( self : List[str] ) -> int:
'''simple docstring'''
A__ = TFBlenderbotModelTester(self )
A__ = ConfigTester(self , config_class=snake_case_ )
def __magic_name__ ( self : int ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ )
@require_tokenizers
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
lowercase__ = ['''My friends are cool but they eat too many carbs.''']
lowercase__ = '''facebook/blenderbot-400M-distill'''
@cached_property
def __magic_name__ ( self : str ) -> int:
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def __magic_name__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __magic_name__ ( self : Tuple ) -> int:
'''simple docstring'''
A__ = self.tokenizer(self.src_text , return_tensors="tf" )
A__ = self.model.generate(
model_inputs.input_ids , )
A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 247 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase_ ( A_, A_, unittest.TestCase ):
lowercase__ = IFPipeline
lowercase__ = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ ( self : Dict ) -> List[str]:
'''simple docstring'''
return self._get_dummy_components()
def __magic_name__ ( self : int , snake_case_ : Tuple , snake_case_ : Union[str, Any]=0 ) -> Optional[int]:
'''simple docstring'''
if str(snake_case_ ).startswith("mps" ):
A__ = torch.manual_seed(snake_case_ )
else:
A__ = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
A__ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __magic_name__ ( self : int ) -> str:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __magic_name__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __magic_name__ ( self : List[str] ) -> Dict:
'''simple docstring'''
self._test_save_load_local()
def __magic_name__ ( self : List[str] ) -> int:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __magic_name__ ( self : Any ) -> Any:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : Optional[int] ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
A__ = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
A__ = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=snake_case_ , tokenizer=snake_case_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
A__, A__ = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
A__ = None
A__ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
A__ = IFImgaImgPipeline(**pipe_a.components )
A__ = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
A__ = IFInpaintingPipeline(**pipe_a.components )
A__ = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : Any , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : str ) -> Union[str, Any]:
'''simple docstring'''
_start_torch_memory_measurement()
A__ = torch.Generator(device="cpu" ).manual_seed(0 )
A__ = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , num_inference_steps=2 , generator=snake_case_ , output_type="np" , )
A__ = output.images[0]
assert image.shape == (64, 64, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
# pipeline 2
_start_torch_memory_measurement()
A__ = torch.Generator(device="cpu" ).manual_seed(0 )
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case_ )
A__ = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="np" , )
A__ = output.images[0]
assert image.shape == (256, 256, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
def __magic_name__ ( self : Dict , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
_start_torch_memory_measurement()
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case_ )
A__ = torch.Generator(device="cpu" ).manual_seed(0 )
A__ = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , num_inference_steps=2 , generator=snake_case_ , output_type="np" , )
A__ = output.images[0]
assert image.shape == (64, 64, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
# pipeline 2
_start_torch_memory_measurement()
A__ = torch.Generator(device="cpu" ).manual_seed(0 )
A__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(snake_case_ )
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case_ )
A__ = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , original_image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="np" , )
A__ = output.images[0]
assert image.shape == (256, 256, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
def __magic_name__ ( self : Optional[Any] , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_start_torch_memory_measurement()
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case_ )
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(snake_case_ )
A__ = torch.Generator(device="cpu" ).manual_seed(0 )
A__ = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , mask_image=snake_case_ , num_inference_steps=2 , generator=snake_case_ , output_type="np" , )
A__ = output.images[0]
assert image.shape == (64, 64, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
# pipeline 2
_start_torch_memory_measurement()
A__ = torch.Generator(device="cpu" ).manual_seed(0 )
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case_ )
A__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(snake_case_ )
A__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(snake_case_ )
A__ = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , mask_image=snake_case_ , original_image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="np" , )
A__ = output.images[0]
assert image.shape == (256, 256, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 247 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 354 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a__ = random.Random()
def __UpperCAmelCase ( __a : Tuple ,__a : str=1.0 ,__a : Optional[int]=None ,__a : List[Any]=None ) -> Any:
"""simple docstring"""
if rng is None:
_a : Dict = global_rng
_a : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _a , _a=7 , _a=4_0_0 , _a=2_0_0_0 , _a=2_0_4_8 , _a=1_2_8 , _a=1 , _a=5_1_2 , _a=3_0 , _a=4_4_1_0_0 , ) -> List[Any]:
_a : Optional[Any] = parent
_a : str = batch_size
_a : List[str] = min_seq_length
_a : str = max_seq_length
_a : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a : List[Any] = spectrogram_length
_a : List[str] = feature_size
_a : List[Any] = num_audio_channels
_a : Tuple = hop_length
_a : Optional[int] = chunk_length
_a : int = sampling_rate
def __lowercase ( self ) -> Union[str, Any]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __lowercase ( self , _a=False , _a=False ) -> List[Any]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_a : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_a : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a : str = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = TvltFeatureExtractor
def __lowercase ( self ) -> Dict:
_a : List[str] = TvltFeatureExtractionTester(self )
def __lowercase ( self ) -> Any:
_a : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_a , '''feature_size''' ) )
self.assertTrue(hasattr(_a , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_a , '''hop_length''' ) )
self.assertTrue(hasattr(_a , '''chunk_length''' ) )
self.assertTrue(hasattr(_a , '''sampling_rate''' ) )
def __lowercase ( self ) -> Optional[int]:
_a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_a : Dict = self.feature_extraction_class.from_pretrained(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Union[str, Any] = feat_extract_second.to_dict()
_a : Any = dict_first.pop('''mel_filters''' )
_a : int = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Optional[int]:
_a : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = os.path.join(_a , '''feat_extract.json''' )
feat_extract_first.to_json_file(_a )
_a : List[str] = self.feature_extraction_class.from_json_file(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Dict = feat_extract_second.to_dict()
_a : str = dict_first.pop('''mel_filters''' )
_a : str = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
# Initialize feature_extractor
_a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_a : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_a : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_a : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_a : Dict = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_a : Union[str, Any] = feature_extractor(
_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_a : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_a : int = np.asarray(_a )
_a : Tuple = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_a : Optional[int] = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowercase ( self ) -> int:
_a : Union[str, Any] = self._load_datasamples(1 )
_a : int = TvltFeatureExtractor()
_a : Union[str, Any] = feature_extractor(_a , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
_a : Union[str, Any] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 15 | 0 |
def __lowerCAmelCase ( a__ ) -> str:
return "".join([hex(a__ )[2:].zfill(2 ).upper() for byte in list(a__ )] )
def __lowerCAmelCase ( a__ ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(a__ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(a__ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(a__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__=0 ): # a graph with Node 0,1,...,N-1
lowercase : List[Any] = n
lowercase : List[Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # adjacency matrix for weight
lowercase : Union[str, Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = w
def __lowerCamelCase ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowercase : Any = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.dp[u][v]
if __name__ == "__main__":
__a = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 337 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Tuple = logging.get_logger(__name__)
def UpperCAmelCase_ (_lowerCAmelCase : List[str] , _lowerCAmelCase : int=False ):
__UpperCamelCase : int = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__UpperCamelCase : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def UpperCAmelCase_ (_lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__UpperCamelCase : str = ""
else:
__UpperCamelCase : Optional[Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCamelCase : Dict = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
__UpperCamelCase : int = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
__UpperCamelCase : Tuple = in_proj_bias[: config.hidden_size]
__UpperCamelCase : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCamelCase : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
__UpperCamelCase : Tuple = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ (_lowerCAmelCase : str ):
__UpperCamelCase : Tuple = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def UpperCAmelCase_ (_lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] ):
__UpperCamelCase : Dict = dct.pop(_lowerCAmelCase )
__UpperCamelCase : int = val
def UpperCAmelCase_ ():
__UpperCamelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCamelCase : int = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ (_lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str]=False ):
__UpperCamelCase : int = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=_lowerCAmelCase , )
__UpperCamelCase : List[Any] = ViTHybridConfig(backbone_config=_lowerCAmelCase , image_size=3_84 , num_labels=10_00 )
__UpperCamelCase : Tuple = False
# load original model from timm
__UpperCamelCase : Any = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__UpperCamelCase : Tuple = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
__UpperCamelCase : int = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__UpperCamelCase : int = "huggingface/label-files"
__UpperCamelCase : str = "imagenet-1k-id2label.json"
__UpperCamelCase : List[Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
__UpperCamelCase : Tuple = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__UpperCamelCase : Dict = idalabel
__UpperCamelCase : Dict = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
__UpperCamelCase : Union[str, Any] = ViTHybridModel(_lowerCAmelCase ).eval()
else:
__UpperCamelCase : Optional[Any] = ViTHybridForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# create image processor
__UpperCamelCase : str = create_transform(**resolve_data_config({} , model=_lowerCAmelCase ) )
__UpperCamelCase : Tuple = transform.transforms
__UpperCamelCase : Tuple = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
__UpperCamelCase : int = ViTHybridImageProcessor(
do_resize=_lowerCAmelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCAmelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__UpperCamelCase : Tuple = prepare_img()
__UpperCamelCase : List[Any] = transform(_lowerCAmelCase ).unsqueeze(0 )
__UpperCamelCase : List[str] = processor(_lowerCAmelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase )
# verify logits
with torch.no_grad():
__UpperCamelCase : List[Any] = model(_lowerCAmelCase )
__UpperCamelCase : List[str] = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
__UpperCamelCase : List[str] = timm_model.forward_features(_lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
__UpperCamelCase : List[str] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
lowercase : int = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub) | 171 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers | 171 | 1 |
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> Optional[int]:
__a = sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda lowerCAmelCase__ : x[0] / x[1] , reverse=__SCREAMING_SNAKE_CASE )
__a = [i[0] for i in r], [i[1] for i in r]
__a = list(accumulate(__SCREAMING_SNAKE_CASE ) )
__a = bisect(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[Any] = "▁"
_lowercase : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_lowercase : Optional[int] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
}
}
_lowercase : str = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
_lowercase : List[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Any = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
lowercase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
lowercase_ : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase_ : Tuple = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase_ : str = 1
lowercase_ : str = len(self.sp_model )
lowercase_ : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__SCREAMING_SNAKE_CASE )
}
lowercase_ : Union[str, Any] = {v: k for k, v in self.lang_code_to_id.items()}
lowercase_ : List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase_ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase_ : Optional[Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowercase_ : Optional[Any] = src_lang if src_lang is not None else '''en_XX'''
lowercase_ : str = self.lang_code_to_id[self._src_lang]
lowercase_ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
lowercase_ : Optional[int] = self.__dict__.copy()
lowercase_ : Dict = None
lowercase_ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : Dict = {}
lowercase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _snake_case ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = [1] * len(self.prefix_tokens )
lowercase_ : Tuple = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : Optional[int] = [self.sep_token_id]
lowercase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase_ : Optional[Any] = src_lang
lowercase_ : Dict = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = tgt_lang_id
return inputs
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ : Any = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : int = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Tuple = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowercase_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : List[str] = src_lang
lowercase_ : int = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = self.lang_code_to_id[src_lang]
lowercase_ : Optional[Any] = []
lowercase_ : List[str] = [self.eos_token_id, self.cur_lang_code]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = self.lang_code_to_id[lang]
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
| 93 | 0 |
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCamelCase_ = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Tuple=False ) -> Optional[int]:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = create_model(
"HTSAT-tiny" , "roberta" , __A , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=__A , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def SCREAMING_SNAKE_CASE_ ( __A : Dict ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = R".*sequential.(\d+).*"
_SCREAMING_SNAKE_CASE = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_SCREAMING_SNAKE_CASE = key.replace(__A , __A )
if re.match(__A , __A ):
# replace sequential layers with list
_SCREAMING_SNAKE_CASE = re.match(__A , __A ).group(1 )
_SCREAMING_SNAKE_CASE = key.replace(f"""sequential.{sequential_layer}.""" , f"""layers.{int(__A )//3}.linear.""" )
elif re.match(__A , __A ):
_SCREAMING_SNAKE_CASE = int(re.match(__A , __A ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_SCREAMING_SNAKE_CASE = 1 if projecton_layer == 0 else 2
_SCREAMING_SNAKE_CASE = key.replace(f"""_projection.{projecton_layer}.""" , f"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
_SCREAMING_SNAKE_CASE = value
_SCREAMING_SNAKE_CASE = mixed_qkv.size(0 ) // 3
_SCREAMING_SNAKE_CASE = mixed_qkv[:qkv_dim]
_SCREAMING_SNAKE_CASE = mixed_qkv[qkv_dim : qkv_dim * 2]
_SCREAMING_SNAKE_CASE = mixed_qkv[qkv_dim * 2 :]
_SCREAMING_SNAKE_CASE = query_layer
_SCREAMING_SNAKE_CASE = key_layer
_SCREAMING_SNAKE_CASE = value_layer
else:
_SCREAMING_SNAKE_CASE = value
return model_state_dict
def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : Optional[Any] , __A : Union[str, Any] , __A : Optional[Any]=False ) -> int:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = init_clap(__A , enable_fusion=__A )
clap_model.eval()
_SCREAMING_SNAKE_CASE = clap_model.state_dict()
_SCREAMING_SNAKE_CASE = rename_state_dict(__A )
_SCREAMING_SNAKE_CASE = ClapConfig()
_SCREAMING_SNAKE_CASE = enable_fusion
_SCREAMING_SNAKE_CASE = ClapModel(__A )
# ignore the spectrogram embedding layer
model.load_state_dict(__A , strict=__A )
model.save_pretrained(__A )
transformers_config.save_pretrained(__A )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
lowerCamelCase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 111 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowercase_ :
"""simple docstring"""
def lowerCAmelCase_ ( self : int , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
"""simple docstring"""
return None
class lowercase_ :
"""simple docstring"""
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
return None
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCamelCase , "tf" , 1_2 , **__lowerCamelCase )
@require_torch
@slow
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCamelCase , "pt" , 1_2 , **__lowerCamelCase )
@require_torch
@slow
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
from transformers import BertModel
_SCREAMING_SNAKE_CASE = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(__lowerCamelCase ) )
vocab_file.flush()
_SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
_SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(__lowerCamelCase ) ) )
model.save_pretrained(__lowerCamelCase )
self._test_export(__lowerCamelCase , "pt" , 1_2 , __lowerCamelCase )
@require_tf
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "tf" , 1_2 , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE = quantize(Path(__lowerCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "pt" , 1_2 , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE = quantize(__lowerCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def lowerCAmelCase_ ( self : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : int ):
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
_SCREAMING_SNAKE_CASE = Path(__lowerCamelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
return path
except Exception as e:
self.fail(__lowerCamelCase )
@require_torch
@require_tokenizers
@slow
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
from transformers import BertModel
_SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
_SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "pt" )
@require_tf
@require_tokenizers
@slow
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
from transformers import TFBertModel
_SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
_SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "tf" )
def lowerCAmelCase_ ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = infer_shapes(__lowerCamelCase , __lowerCamelCase )
# Assert all variables are present
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __lowerCamelCase )
self.assertSequenceEqual(variable_names[3:] , __lowerCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask", "token_type_ids"]
_SCREAMING_SNAKE_CASE = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , __lowerCamelCase , __lowerCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__lowerCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__lowerCamelCase ) , set(__lowerCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__lowerCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , __lowerCamelCase , __lowerCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 111 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 64 |
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
A_ = TypeVar('''T''')
A_ = Union[List[T], Tuple[T, ...]]
A_ = Union[T, List[T], Dict[str, T]]
A_ = Union[str, bytes, os.PathLike]
| 64 | 1 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowercase__ ( __UpperCamelCase = "https://www.worldometers.info/coronavirus" )-> dict:
UpperCamelCase = BeautifulSoup(requests.get(__UpperCamelCase ).text , """html.parser""" )
UpperCamelCase = soup.findAll("""h1""" )
UpperCamelCase = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(__UpperCamelCase , __UpperCamelCase )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f'{key}\n{value}\n')
| 350 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list:
UpperCamelCase = word.split()
def justify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
UpperCamelCase = max_width - width
UpperCamelCase = len(__UpperCamelCase )
if len(__UpperCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCamelCase = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCamelCase = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCamelCase = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__UpperCamelCase ):
num_spaces_between_words_list[i] += 1
UpperCamelCase = []
for i in range(__UpperCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__UpperCamelCase )
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = 0
for word in words:
if width + len(__UpperCamelCase ) + len(__UpperCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__UpperCamelCase )
width += len(__UpperCamelCase )
else:
# justify the line and add it to result
answer.append(justify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) )
# reset new line and new width
UpperCamelCase ,UpperCamelCase = [word], len(__UpperCamelCase )
UpperCamelCase = max_width - width - len(__UpperCamelCase )
answer.append(""" """.join(__UpperCamelCase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 183 | 0 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCAmelCase_ : Optional[int] = '<<<<<<< This should probably be modified because it mentions: '
UpperCAmelCase_ : Tuple = '=======\n>>>>>>>\n'
UpperCAmelCase_ : Tuple = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
UpperCAmelCase_ : List[str] = [
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def SCREAMING_SNAKE_CASE_ ( __A : Namespace ) -> Optional[Any]:
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
@staticmethod
def SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> int:
a_ : Tuple = parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , *SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
a_ : List[str] = get_logger('datasets-cli/converting' )
a_ : Union[str, Any] = tfds_path
a_ : Any = datasets_directory
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
if os.path.isdir(self._tfds_path ):
a_ : Dict = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
a_ : Tuple = os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
a_ : Dict = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
a_ : Optional[Any] = []
a_ : Any = []
a_ : Union[str, Any] = {}
if os.path.isdir(self._tfds_path ):
a_ : Any = os.listdir(SCREAMING_SNAKE_CASE__ )
else:
a_ : int = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
a_ : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not os.path.isfile(SCREAMING_SNAKE_CASE__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as f:
a_ : Union[str, Any] = f.readlines()
a_ : List[str] = []
a_ : Optional[int] = False
a_ : List[str] = False
a_ : List[str] = []
for line in lines:
a_ : str = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
a_ : Optional[int] = 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
a_ : Optional[int] = ''
continue
elif "from absl import logging" in out_line:
a_ : Dict = 'from datasets import logging\n'
elif "getLogger" in out_line:
a_ : Optional[Any] = out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
a_ : List[str] = True
a_ : int = list(filter(lambda SCREAMING_SNAKE_CASE__ : e in out_line , SCREAMING_SNAKE_CASE__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(SCREAMING_SNAKE_CASE__ ) + '\n' )
out_lines.append(SCREAMING_SNAKE_CASE__ )
out_lines.append(SCREAMING_SNAKE_CASE__ )
continue
else:
for pattern, replacement in TO_CONVERT:
a_ : Dict = re.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
a_ : str = re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , SCREAMING_SNAKE_CASE__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
a_ : Optional[int] = 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
a_ : Dict = True
out_lines.append(SCREAMING_SNAKE_CASE__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
a_ : Any = f_name.replace('.py' , '' )
a_ : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : Any = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(SCREAMING_SNAKE_CASE__ )
if needs_manual_update:
with_manual_update.append(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as f:
f.writelines(SCREAMING_SNAKE_CASE__ )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
a_ : List[Any] = os.path.basename(SCREAMING_SNAKE_CASE__ )
a_ : Dict = imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 32 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowercase_ ( ) -> Optional[int]:
lowerCAmelCase__ : Dict = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
lowerCAmelCase__ : int = Dataset.from_dict(__UpperCAmelCase )
return dataset
class _lowerCamelCase ( a_ ):
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Dict = get_dataset()
lowerCAmelCase__ : Optional[int] = make_duplicate_clusters(UpperCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = get_dataset()
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = deduplicate_dataset(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 2 )
print(UpperCamelCase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , UpperCamelCase )
| 242 | 0 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _UpperCamelCase (a__ :Union[dict, list, tuple, torch.Tensor] ):
"""simple docstring"""
UpperCamelCase__ = []
if isinstance(a__ , a__ ):
for v in tree.values():
shapes.extend(_fetch_dims(a__ ) )
elif isinstance(a__ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(a__ ) )
elif isinstance(a__ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def _UpperCamelCase (a__ :int , a__ :Tuple[int, ...] ):
"""simple docstring"""
UpperCamelCase__ = []
for d in reversed(a__ ):
idx.append(flat_idx % d )
UpperCamelCase__ = flat_idx // d
return tuple(reversed(a__ ) )
@torch.jit.ignore
def _UpperCamelCase (a__ :Sequence[int] , a__ :Sequence[int] , a__ :Sequence[int] , a__ :Optional[Sequence[bool]] = None , a__ :Optional[Sequence[bool]] = None , ):
"""simple docstring"""
def reduce_edge_list(a__ :List[bool] ) -> None:
UpperCamelCase__ = True
for i in range(len(a__ ) ):
UpperCamelCase__ = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__ = l[reversed_idx]
if start_edges is None:
UpperCamelCase__ = [s == 0 for s in start]
reduce_edge_list(a__ )
if end_edges is None:
UpperCamelCase__ = [e == (d - 1) for e, d in zip(a__ , a__ )]
reduce_edge_list(a__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(a__ ) == 0:
return [()]
elif len(a__ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCamelCase__ = []
UpperCamelCase__ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(a__ , a__ ):
if s == e:
path_list.append(slice(a__ , s + 1 ) )
else:
break
UpperCamelCase__ = tuple(a__ )
UpperCamelCase__ = len(a__ )
# start == end, and we're done
if divergence_idx == len(a__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ = start[divergence_idx]
return tuple(
path + (slice(a__ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ = end[divergence_idx]
return tuple(
path + (slice(a__ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCamelCase__ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _UpperCamelCase (a__ :torch.Tensor , a__ :int , a__ :int , a__ :int ):
"""simple docstring"""
UpperCamelCase__ = t.shape[:no_batch_dims]
UpperCamelCase__ = list(_flat_idx_to_idx(a__ , a__ ) )
# _get_minimal_slice_set is inclusive
UpperCamelCase__ = list(_flat_idx_to_idx(flat_end - 1 , a__ ) )
# Get an ordered list of slices to perform
UpperCamelCase__ = _get_minimal_slice_set(
a__ , a__ , a__ , )
UpperCamelCase__ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _UpperCamelCase (a__ :Callable , a__ :Dict[str, Any] , a__ :int , a__ :int , a__ :bool = False , a__ :Any = None , a__ :bool = False , ):
"""simple docstring"""
if not (len(a__ ) > 0):
raise ValueError("""Must provide at least one input""" )
UpperCamelCase__ = [shape[:no_batch_dims] for shape in _fetch_dims(a__ )]
UpperCamelCase__ = tuple([max(a__ ) for s in zip(*a__ )] )
def _prep_inputs(a__ :torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCamelCase__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCamelCase__ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCamelCase__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCamelCase__ = tensor_tree_map(_prep_inputs , a__ )
UpperCamelCase__ = None
if _out is not None:
UpperCamelCase__ = tensor_tree_map(lambda a__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCamelCase__ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(a__ :torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__ = 0
UpperCamelCase__ = prepped_outputs
for _ in range(a__ ):
# Chunk the input
if not low_mem:
UpperCamelCase__ = _select_chunk
else:
UpperCamelCase__ = partial(
_chunk_slice , flat_start=a__ , flat_end=min(a__ , i + chunk_size ) , no_batch_dims=len(a__ ) , )
UpperCamelCase__ = tensor_tree_map(a__ , a__ )
# Run the layer on the chunk
UpperCamelCase__ = layer(**a__ )
# Allocate space for the output
if out is None:
UpperCamelCase__ = tensor_tree_map(lambda a__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , a__ )
# Put the chunk in its pre-allocated space
if isinstance(a__ , a__ ):
def assign(a__ :dict , a__ :dict ) -> None:
for k, v in da.items():
if isinstance(a__ , a__ ):
assign(a__ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__ = da[k]
assign(a__ , a__ )
elif isinstance(a__ , a__ ):
for xa, xa in zip(a__ , a__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__ = xa
elif isinstance(a__ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__ = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
UpperCamelCase__ = tensor_tree_map(lambda a__ : t.view(orig_batch_dims + t.shape[1:] ) , a__ )
return out
class __SCREAMING_SNAKE_CASE :
def __init__( self , __lowerCAmelCase = 512 , ):
UpperCamelCase__ = max_chunk_size
UpperCamelCase__ = None
UpperCamelCase__ = None
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCamelCase__ = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__lowerCAmelCase ) -> bool:
try:
with torch.no_grad():
fn(*__lowerCAmelCase , chunk_size=__lowerCAmelCase )
return True
except RuntimeError:
return False
UpperCamelCase__ = 0
UpperCamelCase__ = len(__lowerCAmelCase ) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__ = test_chunk_size(candidates[i] )
if not viable:
UpperCamelCase__ = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__ = i
UpperCamelCase__ = (i + len(__lowerCAmelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = True
for aa, aa in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert type(__lowerCAmelCase ) == type(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , (list, tuple) ):
consistent &= self._compare_arg_caches(__lowerCAmelCase , __lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = [v for _, v in sorted(aa.items() , key=lambda __lowerCAmelCase : x[0] )]
UpperCamelCase__ = [v for _, v in sorted(aa.items() , key=lambda __lowerCAmelCase : x[0] )]
consistent &= self._compare_arg_caches(__lowerCAmelCase , __lowerCAmelCase )
else:
consistent &= aa == aa
return consistent
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
UpperCamelCase__ = True
UpperCamelCase__ = tree_map(lambda __lowerCAmelCase : a.shape if isinstance(__lowerCAmelCase , torch.Tensor ) else a , __lowerCAmelCase , __lowerCAmelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__lowerCAmelCase )
UpperCamelCase__ = self._compare_arg_caches(self.cached_arg_data , __lowerCAmelCase )
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__ = False
if not consistent:
UpperCamelCase__ = self._determine_favorable_chunk_size(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
UpperCamelCase__ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 353 |
UpperCamelCase__ = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
UpperCamelCase__ = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def _UpperCamelCase (a__ :float , a__ :str , a__ :str ):
"""simple docstring"""
UpperCamelCase__ = from_type.lower().strip("""s""" )
UpperCamelCase__ = to_type.lower().strip("""s""" )
UpperCamelCase__ = UNIT_SYMBOL.get(a__ , a__ )
UpperCamelCase__ = UNIT_SYMBOL.get(a__ , a__ )
if from_sanitized not in METRIC_CONVERSION:
UpperCamelCase__ = (
f"""Invalid 'from_type' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
if to_sanitized not in METRIC_CONVERSION:
UpperCamelCase__ = (
f"""Invalid 'to_type' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
UpperCamelCase__ = METRIC_CONVERSION[from_sanitized]
UpperCamelCase__ = METRIC_CONVERSION[to_sanitized]
UpperCamelCase__ = 1
if from_exponent > to_exponent:
UpperCamelCase__ = from_exponent - to_exponent
else:
UpperCamelCase__ = -(to_exponent - from_exponent)
return value * pow(10 , a__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 87 | 0 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def lowercase ( a__ : int , a__ : int , a__ : int ) -> tuple[complex, complex]:
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
_UpperCamelCase = b * b - 4 * a * c
_UpperCamelCase = (-b + sqrt(a__ )) / (2 * a)
_UpperCamelCase = (-b - sqrt(a__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowercase ( ) -> Union[str, Any]:
_UpperCamelCase , _UpperCamelCase = quadratic_roots(a=5 , b=6 , c=1 )
print(F'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 256 | """simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowercase ( a__ : str , a__ : bool = True , a__ : float = math.inf , a__ : float = -math.inf , a__ : float = math.inf , a__ : float = -math.inf , a__ : bool = False , a__ : float = 100 , a__ : float = 0.01 , a__ : float = 1 , ) -> Any:
_UpperCamelCase = False
_UpperCamelCase = search_prob
_UpperCamelCase = start_temperate
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = None
while not search_end:
_UpperCamelCase = current_state.score()
if best_state is None or current_score > best_state.score():
_UpperCamelCase = current_state
scores.append(a__ )
iterations += 1
_UpperCamelCase = None
_UpperCamelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_UpperCamelCase = random.randint(0 , len(a__ ) - 1 ) # picking a random neighbor
_UpperCamelCase = neighbors.pop(a__ )
_UpperCamelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_UpperCamelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_UpperCamelCase = picked_neighbor
else:
_UpperCamelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_UpperCamelCase = picked_neighbor
_UpperCamelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_UpperCamelCase = True
else:
_UpperCamelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(a__ ) , a__ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def lowercase ( a__ : str , a__ : List[Any] ) -> Tuple:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def lowercase ( a__ : str , a__ : Optional[Any] ) -> Union[str, Any]:
return (3 * x**2) - (6 * y)
UpperCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
UpperCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
| 256 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class __magic_name__ :
def __init__( self , snake_case , snake_case) -> None:
'''simple docstring'''
if len(snake_case) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.')
_UpperCAmelCase : list[float] =list(snake_case)
_UpperCAmelCase : int =degree
def __add__( self , snake_case) -> Polynomial:
'''simple docstring'''
if self.degree > polynomial_a.degree:
_UpperCAmelCase : Tuple =self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , snake_case)
else:
_UpperCAmelCase : Optional[Any] =polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , snake_case)
def __sub__( self , snake_case) -> Polynomial:
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1])
def __neg__( self) -> Polynomial:
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients])
def __mul__( self , snake_case) -> Polynomial:
'''simple docstring'''
_UpperCAmelCase : list[float] =[0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , snake_case)
def lowerCAmelCase ( self , snake_case) -> int | float:
'''simple docstring'''
_UpperCAmelCase : int | float =0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self) -> str:
'''simple docstring'''
_UpperCAmelCase : Tuple =''
for i in range(self.degree , -1 , -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(snake_case)
return polynomial
def __repr__( self) -> str:
'''simple docstring'''
return self.__str__()
def lowerCAmelCase ( self) -> Polynomial:
'''simple docstring'''
_UpperCAmelCase : list[float] =[0] * self.degree
for i in range(self.degree):
_UpperCAmelCase : List[Any] =self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , snake_case)
def lowerCAmelCase ( self , snake_case = 0) -> Polynomial:
'''simple docstring'''
_UpperCAmelCase : list[float] =[0] * (self.degree + 2)
_UpperCAmelCase : str =constant
for i in range(self.degree + 1):
_UpperCAmelCase : str =self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , snake_case)
def __eq__( self , snake_case) -> bool:
'''simple docstring'''
if not isinstance(snake_case , snake_case):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , snake_case) -> bool:
'''simple docstring'''
return not self.__eq__(snake_case)
| 242 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase ={
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase =[
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase =[
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
lowercase =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 242 | 1 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class A__ ( pl.LightningModule ):
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Any ) -> int:
'''simple docstring'''
super().__init__()
_a : Optional[Any] =model
_a : Any =2
_a : Union[str, Any] =nn.Linear(self.model.config.hidden_size , self.num_labels )
def __UpperCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : str ) -> int:
# load longformer model from model identifier
_a : Any =LongformerModel.from_pretrained(_UpperCAmelCase )
_a : Dict =LightningModel(_UpperCAmelCase )
_a : Tuple =torch.load(_UpperCAmelCase ,map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
_a : List[Any] =LongformerForQuestionAnswering.from_pretrained(_UpperCAmelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCAmelCase )
print(F"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
A__: Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__: int = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 276 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
A__: int = logging.getLogger(__name__)
@dataclass
class A__ :
__UpperCamelCase : str
__UpperCamelCase : List[str]
__UpperCamelCase : Optional[List[str]]
@dataclass
class A__ :
__UpperCamelCase : List[int]
__UpperCamelCase : List[int]
__UpperCamelCase : Optional[List[int]] = None
__UpperCamelCase : Optional[List[int]] = None
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str = "train"
__UpperCamelCase : Tuple = "dev"
__UpperCamelCase : str = "test"
class A__ :
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Union[Split, str] ) -> List[InputExample]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :List[InputExample] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Optional[Any]="[CLS]" , SCREAMING_SNAKE_CASE :Optional[int]=1 , SCREAMING_SNAKE_CASE :Any="[SEP]" , SCREAMING_SNAKE_CASE :List[Any]=False , SCREAMING_SNAKE_CASE :Union[str, Any]=False , SCREAMING_SNAKE_CASE :List[str]=0 , SCREAMING_SNAKE_CASE :str=0 , SCREAMING_SNAKE_CASE :Dict=-1_0_0 , SCREAMING_SNAKE_CASE :Optional[int]=0 , SCREAMING_SNAKE_CASE :Tuple=True , ) -> List[InputFeatures]:
'''simple docstring'''
_a : str ={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )}
_a : Tuple =[]
for ex_index, example in enumerate(SCREAMING_SNAKE_CASE ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d of %d""" , SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
_a : Optional[Any] =[]
_a : List[Any] =[]
for word, label in zip(example.words , example.labels ):
_a : Optional[int] =tokenizer.tokenize(SCREAMING_SNAKE_CASE )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(SCREAMING_SNAKE_CASE ) > 0:
tokens.extend(SCREAMING_SNAKE_CASE )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(SCREAMING_SNAKE_CASE ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_a : Optional[int] =tokenizer.num_special_tokens_to_add()
if len(SCREAMING_SNAKE_CASE ) > max_seq_length - special_tokens_count:
_a : List[Any] =tokens[: (max_seq_length - special_tokens_count)]
_a : Tuple =label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_a : Dict =[sequence_a_segment_id] * len(SCREAMING_SNAKE_CASE )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_a : Any =[cls_token] + tokens
_a : Dict =[pad_token_label_id] + label_ids
_a : Union[str, Any] =[cls_token_segment_id] + segment_ids
_a : List[str] =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_a : Optional[int] =[1 if mask_padding_with_zero else 0] * len(SCREAMING_SNAKE_CASE )
# Zero-pad up to the sequence length.
_a : Union[str, Any] =max_seq_length - len(SCREAMING_SNAKE_CASE )
if pad_on_left:
_a : Optional[Any] =([pad_token] * padding_length) + input_ids
_a : Optional[int] =([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_a : Union[str, Any] =([pad_token_segment_id] * padding_length) + segment_ids
_a : Dict =([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_a : Tuple =None
features.append(
InputFeatures(
input_ids=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , label_ids=SCREAMING_SNAKE_CASE ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[InputFeatures]
__UpperCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self :Dict , SCREAMING_SNAKE_CASE :TokenClassificationTask , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :int=False , SCREAMING_SNAKE_CASE :Split = Split.train , ) -> List[str]:
'''simple docstring'''
# Load data features from cache or dataset file
_a : Optional[Any] =os.path.join(
SCREAMING_SNAKE_CASE , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a : List[str] =cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE ):
if os.path.exists(SCREAMING_SNAKE_CASE ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
_a : Any =torch.load(SCREAMING_SNAKE_CASE )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
_a : Any =token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
_a : List[str] =token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"Saving features into cached file {cached_features_file}" )
torch.save(self.features , SCREAMING_SNAKE_CASE )
def __len__( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self :Dict , SCREAMING_SNAKE_CASE :int ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class A__ :
__UpperCamelCase : List[InputFeatures]
__UpperCamelCase : int = -100
def __init__( self :str , SCREAMING_SNAKE_CASE :TokenClassificationTask , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Split = Split.train , ) -> Any:
'''simple docstring'''
_a : Tuple =token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
_a : List[Any] =token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_a : Union[str, Any] =tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_a : Union[str, Any] =tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
_a : List[Any] =self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self :str ) -> Optional[int]:
'''simple docstring'''
return len(self.features )
def __getitem__( self :int , SCREAMING_SNAKE_CASE :str ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
| 276 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_a : Optional[int] = logging.getLogger(__name__)
def _lowerCAmelCase ( ) -> Optional[Any]:
__lowerCAmelCase = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=lowercase , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=lowercase , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=lowercase , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=lowercase , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=lowercase , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=lowercase , type=lowercase , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=lowercase , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=lowercase , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
__lowerCAmelCase = parser.parse_args()
return args
def _lowerCAmelCase ( lowercase ) -> List[Any]:
def fn(lowercase ):
return tokenizer(examples["""text"""] )
return fn
def _lowerCAmelCase ( lowercase ) -> List[str]:
__lowerCAmelCase = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
__lowerCAmelCase = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
__lowerCAmelCase = tf.train.Features(feature=lowercase )
__lowerCAmelCase = tf.train.Example(features=lowercase )
__lowerCAmelCase = example.SerializeToString()
records.append(lowercase )
return records
def _lowerCAmelCase ( lowercase ) -> Tuple:
__lowerCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__lowerCAmelCase = min(len(lowercase ) , args.limit )
__lowerCAmelCase = dataset.select(range(lowercase ) )
print(f'Limiting the dataset to {args.limit} entries.' )
__lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__lowerCAmelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowercase ):
os.makedirs(lowercase )
else:
__lowerCAmelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__lowerCAmelCase = tokenize_function(lowercase )
__lowerCAmelCase = dataset.map(lowercase , batched=lowercase , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowercase ):
# Concatenate all texts.
__lowerCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
__lowerCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__lowerCAmelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__lowerCAmelCase = {
k: [t[i : i + args.max_length] for i in range(0 , lowercase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__lowerCAmelCase = dataset_tokenized.map(lowercase , batched=lowercase , batch_size=1000 , num_proc=4 )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for shard in range(0 , len(lowercase ) , args.shard_size ):
__lowerCAmelCase = grouped_dataset[shard : shard + args.shard_size]
__lowerCAmelCase = len(dataset_snapshot["""input_ids"""] )
__lowerCAmelCase = os.path.join(lowercase , f'dataset-{shard_count}-{records_containing}.tfrecord' )
__lowerCAmelCase = get_serialized_examples(lowercase )
with tf.io.TFRecordWriter(lowercase ) as out_file:
for i in range(len(lowercase ) ):
__lowerCAmelCase = serialized_examples[i]
out_file.write(lowercase )
print("""Wrote file {} containing {} records""".format(lowercase , lowercase ) )
shard_count += 1
total_records += records_containing
with open(f'split-{args.split}-records-count.txt' , """w""" ) as f:
print(f'Total {args.split} records: {total_records}' , file=lowercase )
if __name__ == "__main__":
_a : Any = parse_args()
main(args)
| 46 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def _lowerCAmelCase ( lowercase ) -> Callable:
@wraps(lowercase )
def _inner_fn(*lowercase , **lowercase ):
warnings.warn(
(f'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , lowercase , )
return fn(*lowercase , **lowercase )
return _inner_fn
| 46 | 1 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 240 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int ):
# Construct model
if gpta_config_file == "":
a__ = GPTaConfig()
else:
a__ = GPTaConfig.from_json_file(__lowerCAmelCase )
a__ = GPTaModel(__lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
a__ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
a__ = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , __lowerCAmelCase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
snake_case : Any = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 240 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
lowercase__ : List[Any] = params
lowercase__ : List[Any] = np.array(__lowerCAmelCase )
lowercase__ : Dict = np.array([len(__lowerCAmelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __lowerCAmelCase ) -> Dict:
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> List[Any]:
return len(self.lengths )
def _lowerCAmelCase( self ) -> Optional[int]:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Optional[int] = self.params.max_model_input_size
lowercase__ : Dict = self.lengths > max_len
logger.info(F"""Splitting {sum(__lowerCAmelCase )} too long sequences.""" )
def divide_chunks(__lowerCAmelCase , __lowerCAmelCase ):
return [l[i : i + n] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )]
lowercase__ : List[str] = []
lowercase__ : Tuple = []
if self.params.mlm:
lowercase__ , lowercase__ : Dict = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
lowercase__ , lowercase__ : Optional[Any] = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowercase__ : List[str] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
lowercase__ : List[Any] = np.insert(__lowerCAmelCase , 0 , __lowerCAmelCase )
if sub_s[-1] != sep_id:
lowercase__ : List[Any] = np.insert(__lowerCAmelCase , len(__lowerCAmelCase ) , __lowerCAmelCase )
assert len(__lowerCAmelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__lowerCAmelCase )
new_tok_ids.extend(__lowerCAmelCase )
new_lengths.extend([len(__lowerCAmelCase ) for l in sub_seqs] )
lowercase__ : Tuple = np.array(__lowerCAmelCase )
lowercase__ : Tuple = np.array(__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : str = len(self )
lowercase__ : int = self.lengths > 11
lowercase__ : Optional[int] = self.token_ids[indices]
lowercase__ : int = self.lengths[indices]
lowercase__ : Optional[Any] = len(self )
logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def _lowerCAmelCase( self ) -> Optional[int]:
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowercase__ : Any = self.params.special_tok_ids['''unk_token''']
lowercase__ : List[str] = len(self )
lowercase__ : int = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowercase__ : Dict = (unk_occs / self.lengths) < 0.5
lowercase__ : Tuple = self.token_ids[indices]
lowercase__ : str = self.lengths[indices]
lowercase__ : Optional[Any] = len(self )
logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def _lowerCAmelCase( self ) -> List[str]:
if not self.params.is_master:
return
logger.info(F"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
lowercase__ : Any = [t[0] for t in batch]
lowercase__ : Optional[int] = [t[1] for t in batch]
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
# Max for paddings
lowercase__ : List[str] = max(__lowerCAmelCase )
# Pad token ids
if self.params.mlm:
lowercase__ : List[str] = self.params.special_tok_ids['''pad_token''']
else:
lowercase__ : Tuple = self.params.special_tok_ids['''unk_token''']
lowercase__ : Any = [list(t.astype(__lowerCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(__lowerCAmelCase )) for t in token_ids]
assert len(tk_ ) == len(__lowerCAmelCase )
assert all(len(__lowerCAmelCase ) == max_seq_len_ for t in tk_ )
lowercase__ : Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowercase__ : Optional[int] = torch.tensor(__lowerCAmelCase ) # (bs)
return tk_t, lg_t
| 214 | '''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ProphetNetTokenizer
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Any:
super().setUp()
lowercase__ : Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> str:
lowercase__ : Union[str, Any] = '''UNwant\u00E9d,running'''
lowercase__ : List[Any] = '''unwanted, running'''
return input_text, output_text
def _lowerCAmelCase( self ) -> Any:
lowercase__ : List[str] = self.tokenizer_class(self.vocab_file )
lowercase__ : Union[str, Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : List[str] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : List[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : str = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Union[str, Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : int = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Tuple = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCAmelCase( self ) -> Any:
lowercase__ : Tuple = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowercase__ : Union[str, Any] = {}
for i, token in enumerate(__lowerCAmelCase ):
lowercase__ : List[Any] = i
lowercase__ : Dict = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
lowercase__ : Optional[int] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowercase__ : Dict = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
lowercase__ : int = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : Optional[int] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def _lowerCAmelCase( self ) -> str:
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def _lowerCAmelCase( self ) -> Optional[Any]:
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def _lowerCAmelCase( self ) -> Dict:
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : List[Any] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
lowercase__ : List[str] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
lowercase__ : Optional[int] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
lowercase__ : Tuple = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
lowercase__ : List[str] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 214 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
if len(_UpperCAmelCase ) < k or k < 0:
raise ValueError('Invalid Input' )
lowerCamelCase__ : Optional[int] = sum(array[:k] )
for i in range(len(_UpperCAmelCase ) - k ):
lowerCamelCase__ : str = current_sum - array[i] + array[i + k]
lowerCamelCase__ : List[str] = max(_UpperCAmelCase , _UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
_UpperCAmelCase : List[Any] = [randint(-10_00, 10_00) for i in range(1_00)]
_UpperCAmelCase : Optional[int] = randint(0, 1_10)
print(F"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 50 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase :
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any]=99 , UpperCAmelCase : str=13 , UpperCAmelCase : List[str]=7 , UpperCAmelCase : str=9 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : str=True , UpperCAmelCase : Any=False , UpperCAmelCase : Union[str, Any]=32 , UpperCAmelCase : List[str]=5 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Union[str, Any]=37 , UpperCAmelCase : int=8 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=0.0_0_2 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[Any]=None , ) -> Union[str, Any]:
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Optional[int] = encoder_seq_length
lowerCamelCase__ : int = decoder_seq_length
# For common tests
lowerCamelCase__ : List[str] = self.decoder_seq_length
lowerCamelCase__ : Optional[int] = is_training
lowerCamelCase__ : List[Any] = use_attention_mask
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : str = d_ff
lowerCamelCase__ : Optional[Any] = relative_attention_num_buckets
lowerCamelCase__ : Any = dropout_rate
lowerCamelCase__ : Any = initializer_factor
lowerCamelCase__ : Union[str, Any] = eos_token_id
lowerCamelCase__ : List[str] = pad_token_id
lowerCamelCase__ : List[str] = decoder_start_token_id
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Optional[Any] = decoder_layers
def A_ ( self : List[Any] ) -> int:
return TaConfig.from_pretrained('google/umt5-base' )
def A_ ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : str=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[Any]=None , ) -> List[str]:
if attention_mask is None:
lowerCamelCase__ : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase__ : Optional[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase__ : int = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase )
if decoder_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
if cross_attn_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def A_ ( self : str ) -> List[str]:
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase__ : List[str] = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Union[str, Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Dict = self.get_config()
lowerCamelCase__ : Tuple = config.num_attention_heads
lowerCamelCase__ : Any = self.prepare_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, input_dict
def A_ ( self : Tuple ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def A_ ( self : Optional[int] ) -> List[str]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Union[str, Any] ) -> Dict:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Dict , ) -> str:
lowerCamelCase__ : Dict = UMTaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Optional[int] = model(
input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , )
lowerCamelCase__ : Any = model(input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase )
lowerCamelCase__ : Dict = result.last_hidden_state
lowerCamelCase__ : Any = result.past_key_values
lowerCamelCase__ : List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def A_ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , ) -> Optional[int]:
lowerCamelCase__ : List[Any] = UMTaModel(config=UpperCAmelCase ).get_decoder().to(UpperCAmelCase ).eval()
# first forward pass
lowerCamelCase__ : Tuple = model(UpperCAmelCase , use_cache=UpperCAmelCase )
lowerCamelCase__ : List[Any] = model(UpperCAmelCase )
lowerCamelCase__ : int = model(UpperCAmelCase , use_cache=UpperCAmelCase )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) + 1 )
lowerCamelCase__ , lowerCamelCase__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCamelCase__ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ : List[str] = model(UpperCAmelCase )['last_hidden_state']
lowerCamelCase__ : str = model(UpperCAmelCase , past_key_values=UpperCAmelCase )['last_hidden_state']
# select random slice
lowerCamelCase__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ : Tuple = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase__ : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = UMTaModel(config=UpperCAmelCase ).to(UpperCAmelCase ).half().eval()
lowerCamelCase__ : Optional[int] = model(**UpperCAmelCase )['last_hidden_state']
self.parent.assertFalse(torch.isnan(UpperCAmelCase ).any().item() )
@require_torch
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCAmelCase__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCAmelCase__ = [0.8, 0.9]
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def A_ ( self : Tuple ) -> int:
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Tuple = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=UpperCAmelCase , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def A_ ( self : Tuple ) -> Optional[Any]:
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase )
def A_ ( self : List[Any] ) -> str:
lowerCamelCase__ : int = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Any = config_and_inputs[0]
lowerCamelCase__ : Any = UMTaForConditionalGeneration(UpperCAmelCase ).eval()
model.to(UpperCAmelCase )
lowerCamelCase__ : Tuple = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase , head_masking.items() ):
lowerCamelCase__ : Union[str, Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCamelCase__ : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase )
lowerCamelCase__ : Tuple = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase , return_dict_in_generate=UpperCAmelCase , **UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCamelCase__ : Union[str, Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def A_ ( self : Any ) -> int:
lowerCamelCase__ : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=UpperCAmelCase ).to(UpperCAmelCase )
lowerCamelCase__ : List[str] = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=UpperCAmelCase , legacy=UpperCAmelCase )
lowerCamelCase__ : Dict = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
lowerCamelCase__ : Tuple = tokenizer(UpperCAmelCase , return_tensors='pt' , padding=UpperCAmelCase ).input_ids
# fmt: off
lowerCamelCase__ : Any = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = model.generate(input_ids.to(UpperCAmelCase ) )
lowerCamelCase__ : List[Any] = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
lowerCamelCase__ : Union[str, Any] = tokenizer.batch_decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 50 | 1 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 361 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase__ : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids
UpperCAmelCase__ : Union[str, Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ).loss
UpperCAmelCase__ : Optional[Any] = -tf.math.reduce_mean(snake_case__ ).numpy()
UpperCAmelCase__ : List[Any] = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 298 | 0 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Union[str, Any] = logging.get_logger(__name__)
_snake_case : Any = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCAmelCase_ ( __lowerCamelCase ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__snake_case : Dict = k.replace(__lowerCamelCase , __lowerCamelCase )
if k.startswith("encoder" ):
__snake_case : Tuple = k.replace(".attn" , ".self_attn" )
__snake_case : Dict = k.replace("norm1" , "self_attn_layer_norm" )
__snake_case : Union[str, Any] = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
__snake_case : Tuple = k.replace("norm1" , "self_attn_layer_norm" )
__snake_case : Any = k.replace("norm2" , "encoder_attn_layer_norm" )
__snake_case : Union[str, Any] = k.replace("norm3" , "final_layer_norm" )
return k
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Dict = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
__snake_case : str = sd.pop(__lowerCamelCase )
__snake_case : Any = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
__snake_case : List[Any] = v
_snake_case : Union[str, Any] = ["START"]
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : int = torch.load(__lowerCamelCase , map_location="cpu" )
__snake_case : Optional[Any] = model["model"]
__snake_case : str = BlenderbotConfig.from_json_file(__lowerCamelCase )
__snake_case : List[str] = BlenderbotForConditionalGeneration(__lowerCamelCase )
__snake_case : Dict = m.model.state_dict().keys()
__snake_case : Union[str, Any] = []
__snake_case : List[Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__snake_case : Union[str, Any] = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__snake_case : Tuple = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
_snake_case : Union[str, Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 123 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_snake_case : Union[str, Any] = 0
_snake_case : List[str] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_snake_case : List[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_snake_case : int = tuple[int, int]
class a :
"""simple docstring"""
def __init__( self : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : Node | None , ) -> None:
__snake_case : List[str] = pos_x
__snake_case : List[str] = pos_y
__snake_case : Dict = (pos_y, pos_x)
__snake_case : List[Any] = goal_x
__snake_case : Union[str, Any] = goal_y
__snake_case : int = g_cost
__snake_case : List[Any] = parent
__snake_case : Optional[Any] = self.calculate_heuristic()
__snake_case : Union[str, Any] = self.g_cost + self.h_cost
def __snake_case ( self : Optional[int] ) -> float:
__snake_case : Union[str, Any] = self.pos_x - self.goal_x
__snake_case : Tuple = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase ) + abs(lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Optional[int] , lowerCamelCase : Node ) -> bool:
return self.f_cost < other.f_cost
class a :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase : TPosition , lowerCamelCase : TPosition ) -> Optional[Any]:
__snake_case : Any = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase )
__snake_case : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , lowerCamelCase )
__snake_case : str = [self.start]
__snake_case : list[Node] = []
__snake_case : int = False
def __snake_case ( self : Tuple ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__snake_case : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase )
self.closed_nodes.append(lowerCamelCase )
__snake_case : Tuple = self.get_successors(lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
__snake_case : Any = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase )
else:
self.open_nodes.append(lowerCamelCase )
return [self.start.pos]
def __snake_case ( self : Optional[Any] , lowerCamelCase : Node ) -> list[Node]:
__snake_case : int = []
for action in delta:
__snake_case : Tuple = parent.pos_x + action[1]
__snake_case : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase , lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase , ) )
return successors
def __snake_case ( self : Optional[Any] , lowerCamelCase : Node | None ) -> list[TPosition]:
__snake_case : List[Any] = node
__snake_case : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__snake_case : Tuple = current_node.parent
path.reverse()
return path
class a :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase : TPosition , lowerCamelCase : TPosition ) -> None:
__snake_case : str = AStar(lowerCamelCase , lowerCamelCase )
__snake_case : int = AStar(lowerCamelCase , lowerCamelCase )
__snake_case : int = False
def __snake_case ( self : str ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__snake_case : Optional[int] = self.fwd_astar.open_nodes.pop(0 )
__snake_case : str = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase , lowerCamelCase )
self.fwd_astar.closed_nodes.append(lowerCamelCase )
self.bwd_astar.closed_nodes.append(lowerCamelCase )
__snake_case : Optional[Any] = current_bwd_node
__snake_case : Any = current_fwd_node
__snake_case : int = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
__snake_case : Optional[int] = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase )
else:
astar.open_nodes.append(lowerCamelCase )
return [self.fwd_astar.start.pos]
def __snake_case ( self : Any , lowerCamelCase : Node , lowerCamelCase : Node ) -> list[TPosition]:
__snake_case : Optional[int] = self.fwd_astar.retrace_path(lowerCamelCase )
__snake_case : Optional[Any] = self.bwd_astar.retrace_path(lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
__snake_case : int = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_snake_case : Dict = (0, 0)
_snake_case : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_snake_case : List[Any] = time.time()
_snake_case : Dict = AStar(init, goal)
_snake_case : Optional[int] = a_star.search()
_snake_case : Optional[Any] = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
_snake_case : List[str] = time.time()
_snake_case : Any = BidirectionalAStar(init, goal)
_snake_case : List[str] = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 123 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_a = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['OwlViTFeatureExtractor']
_a = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 144 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 144 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 148 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__A = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : Union[str, Any] = ["""pixel_values"""]
def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
snake_case : int = size if size is not None else {"shortest_edge": 224}
snake_case : int = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
snake_case : List[str] = crop_size if crop_size is not None else {"height": 224, "width": 224}
snake_case : Tuple = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE , param_name="crop_size" )
snake_case : Dict = do_resize
snake_case : Optional[int] = size
snake_case : int = resample
snake_case : Union[str, Any] = do_center_crop
snake_case : Dict = crop_size
snake_case : Dict = do_rescale
snake_case : Any = rescale_factor
snake_case : Tuple = do_normalize
snake_case : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case : Tuple = do_convert_rgb
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
snake_case : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
snake_case : Dict = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=size["shortest_edge"] , default_to_square=SCREAMING_SNAKE_CASE )
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
snake_case : Tuple = get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
snake_case : int = do_resize if do_resize is not None else self.do_resize
snake_case : List[str] = size if size is not None else self.size
snake_case : Dict = get_size_dict(SCREAMING_SNAKE_CASE , param_name="size" , default_to_square=SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = resample if resample is not None else self.resample
snake_case : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Optional[int] = crop_size if crop_size is not None else self.crop_size
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE , param_name="crop_size" , default_to_square=SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : List[str] = image_mean if image_mean is not None else self.image_mean
snake_case : Optional[int] = image_std if image_std is not None else self.image_std
snake_case : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case : Optional[int] = [convert_to_rgb(SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
snake_case : List[str] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
snake_case : Optional[Any] = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
snake_case : int = [self.center_crop(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
snake_case : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
snake_case : Optional[int] = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images]
snake_case : Optional[int] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
snake_case : Tuple = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 148 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : torch.FloatTensor
class _SCREAMING_SNAKE_CASE( A , A ):
@register_to_config
def __init__( self ,SCREAMING_SNAKE_CASE__ = 6_55_36 ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = 2 ,SCREAMING_SNAKE_CASE__ = 2 ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = "fourier" ,SCREAMING_SNAKE_CASE__ = True ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = 0.0 ,SCREAMING_SNAKE_CASE__ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") ,SCREAMING_SNAKE_CASE__ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") ,SCREAMING_SNAKE_CASE__ = "UNetMidBlock1D" ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = (32, 32, 64) ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = 8 ,SCREAMING_SNAKE_CASE__ = 1 ,SCREAMING_SNAKE_CASE__ = False ,) -> Dict:
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE :Tuple = sample_size
# time
if time_embedding_type == "fourier":
__SCREAMING_SNAKE_CASE :Optional[Any] = GaussianFourierProjection(
embedding_size=8 ,set_W_to_weight=SCREAMING_SNAKE_CASE__ ,log=SCREAMING_SNAKE_CASE__ ,flip_sin_to_cos=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__SCREAMING_SNAKE_CASE :List[str] = Timesteps(
block_out_channels[0] ,flip_sin_to_cos=SCREAMING_SNAKE_CASE__ ,downscale_freq_shift=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = block_out_channels[0]
if use_timestep_embedding:
__SCREAMING_SNAKE_CASE :List[Any] = block_out_channels[0] * 4
__SCREAMING_SNAKE_CASE :Union[str, Any] = TimestepEmbedding(
in_channels=SCREAMING_SNAKE_CASE__ ,time_embed_dim=SCREAMING_SNAKE_CASE__ ,act_fn=SCREAMING_SNAKE_CASE__ ,out_dim=block_out_channels[0] ,)
__SCREAMING_SNAKE_CASE :List[str] = nn.ModuleList([] )
__SCREAMING_SNAKE_CASE :List[Any] = None
__SCREAMING_SNAKE_CASE :Tuple = nn.ModuleList([] )
__SCREAMING_SNAKE_CASE :Union[str, Any] = None
# down
__SCREAMING_SNAKE_CASE :Union[str, Any] = in_channels
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Dict = output_channel
__SCREAMING_SNAKE_CASE :Optional[int] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__SCREAMING_SNAKE_CASE :Optional[int] = i == len(SCREAMING_SNAKE_CASE__ ) - 1
__SCREAMING_SNAKE_CASE :Dict = get_down_block(
SCREAMING_SNAKE_CASE__ ,num_layers=SCREAMING_SNAKE_CASE__ ,in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,temb_channels=block_out_channels[0] ,add_downsample=not is_final_block or downsample_each_block ,)
self.down_blocks.append(SCREAMING_SNAKE_CASE__ )
# mid
__SCREAMING_SNAKE_CASE :List[Any] = get_mid_block(
SCREAMING_SNAKE_CASE__ ,in_channels=block_out_channels[-1] ,mid_channels=block_out_channels[-1] ,out_channels=block_out_channels[-1] ,embed_dim=block_out_channels[0] ,num_layers=SCREAMING_SNAKE_CASE__ ,add_downsample=SCREAMING_SNAKE_CASE__ ,)
# up
__SCREAMING_SNAKE_CASE :List[str] = list(reversed(SCREAMING_SNAKE_CASE__ ) )
__SCREAMING_SNAKE_CASE :Tuple = reversed_block_out_channels[0]
if out_block_type is None:
__SCREAMING_SNAKE_CASE :List[Any] = out_channels
else:
__SCREAMING_SNAKE_CASE :Any = block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :List[str] = output_channel
__SCREAMING_SNAKE_CASE :Dict = (
reversed_block_out_channels[i + 1] if i < len(SCREAMING_SNAKE_CASE__ ) - 1 else final_upsample_channels
)
__SCREAMING_SNAKE_CASE :Any = i == len(SCREAMING_SNAKE_CASE__ ) - 1
__SCREAMING_SNAKE_CASE :Any = get_up_block(
SCREAMING_SNAKE_CASE__ ,num_layers=SCREAMING_SNAKE_CASE__ ,in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,temb_channels=block_out_channels[0] ,add_upsample=not is_final_block ,)
self.up_blocks.append(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = output_channel
# out
__SCREAMING_SNAKE_CASE :int = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 ,32 )
__SCREAMING_SNAKE_CASE :Dict = get_out_block(
out_block_type=SCREAMING_SNAKE_CASE__ ,num_groups_out=SCREAMING_SNAKE_CASE__ ,embed_dim=block_out_channels[0] ,out_channels=SCREAMING_SNAKE_CASE__ ,act_fn=SCREAMING_SNAKE_CASE__ ,fc_dim=block_out_channels[-1] // 4 ,)
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = True ,) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = timestep
if not torch.is_tensor(SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Any = torch.tensor([timesteps] ,dtype=torch.long ,device=sample.device )
elif torch.is_tensor(SCREAMING_SNAKE_CASE__ ) and len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE :int = timesteps[None].to(sample.device )
__SCREAMING_SNAKE_CASE :str = self.time_proj(SCREAMING_SNAKE_CASE__ )
if self.config.use_timestep_embedding:
__SCREAMING_SNAKE_CASE :List[Any] = self.time_mlp(SCREAMING_SNAKE_CASE__ )
else:
__SCREAMING_SNAKE_CASE :int = timestep_embed[..., None]
__SCREAMING_SNAKE_CASE :Union[str, Any] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
__SCREAMING_SNAKE_CASE :List[str] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
__SCREAMING_SNAKE_CASE :Dict = ()
for downsample_block in self.down_blocks:
__SCREAMING_SNAKE_CASE :Optional[int] = downsample_block(hidden_states=SCREAMING_SNAKE_CASE__ ,temb=SCREAMING_SNAKE_CASE__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__SCREAMING_SNAKE_CASE :Tuple = self.mid_block(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
__SCREAMING_SNAKE_CASE :Optional[int] = down_block_res_samples[-1:]
__SCREAMING_SNAKE_CASE :Any = down_block_res_samples[:-1]
__SCREAMING_SNAKE_CASE :str = upsample_block(SCREAMING_SNAKE_CASE__ ,res_hidden_states_tuple=SCREAMING_SNAKE_CASE__ ,temb=SCREAMING_SNAKE_CASE__ )
# 5. post-process
if self.out_block:
__SCREAMING_SNAKE_CASE :str = self.out_block(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=SCREAMING_SNAKE_CASE__ ) | 365 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def __lowerCamelCase ( a_ : float , a_ : float , a_ : int ) -> float:
__SCREAMING_SNAKE_CASE :List[Any] = x
__SCREAMING_SNAKE_CASE :List[Any] = y
for step in range(a_ ): # noqa: B007
__SCREAMING_SNAKE_CASE :Dict = a * a - b * b + x
__SCREAMING_SNAKE_CASE :Tuple = 2 * a * b + y
__SCREAMING_SNAKE_CASE :Dict = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __lowerCamelCase ( a_ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def __lowerCamelCase ( a_ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(a_ , 1 , 1 ) )
def __lowerCamelCase ( a_ : int = 8_00 , a_ : int = 6_00 , a_ : float = -0.6 , a_ : float = 0 , a_ : float = 3.2 , a_ : int = 50 , a_ : bool = True , ) -> Image.Image:
__SCREAMING_SNAKE_CASE :Optional[int] = Image.new('''RGB''' , (image_width, image_height) )
__SCREAMING_SNAKE_CASE :Tuple = img.load()
# loop through the image-coordinates
for image_x in range(a_ ):
for image_y in range(a_ ):
# determine the figure-coordinates based on the image-coordinates
__SCREAMING_SNAKE_CASE :Dict = figure_width / image_width * image_height
__SCREAMING_SNAKE_CASE :str = figure_center_x + (image_x / image_width - 0.5) * figure_width
__SCREAMING_SNAKE_CASE :Tuple = figure_center_y + (image_y / image_height - 0.5) * figure_height
__SCREAMING_SNAKE_CASE :List[Any] = get_distance(a_ , a_ , a_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__SCREAMING_SNAKE_CASE :Optional[int] = get_color_coded_rgb(a_ )
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = get_black_and_white_rgb(a_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show() | 239 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class _a ( _lowercase , _lowercase , _lowercase , unittest.TestCase):
_a : str = StableDiffusionLatentUpscalePipeline
_a : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
_a : int = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
_a : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a : Tuple = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a : Optional[int] = frozenset([])
_a : str = True
@property
def UpperCAmelCase__( self : int )-> List[str]:
lowerCAmelCase__ : List[Any] = 1
lowerCAmelCase__ : List[Any] = 4
lowerCAmelCase__ : int = (16, 16)
lowerCAmelCase__ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_SCREAMING_SNAKE_CASE )
return image
def UpperCAmelCase__( self : Union[str, Any] )-> str:
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_SCREAMING_SNAKE_CASE , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_SCREAMING_SNAKE_CASE , only_cross_attention=_SCREAMING_SNAKE_CASE , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
lowerCAmelCase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
lowerCAmelCase__ : List[str] = EulerDiscreteScheduler(prediction_type='''sample''' )
lowerCAmelCase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''quick_gelu''' , projection_dim=512 , )
lowerCAmelCase__ : Any = CLIPTextModel(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCAmelCase__ : Any = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str=0 )-> List[Any]:
if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase__ : Optional[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__( self : Tuple )-> str:
lowerCAmelCase__ : Union[str, Any] = '''cpu'''
lowerCAmelCase__ : List[str] = self.get_dummy_components()
lowerCAmelCase__ : List[str] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = pipe(**_SCREAMING_SNAKE_CASE ).images
lowerCAmelCase__ : List[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
lowerCAmelCase__ : List[str] = np.array(
[0.4722_2412, 0.4192_1633, 0.4471_7434, 0.4687_4192, 0.4258_8258, 0.4615_0726, 0.467_7534, 0.4558_3832, 0.4857_9055] )
lowerCAmelCase__ : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 )
def UpperCAmelCase__( self : List[Any] )-> Dict:
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def UpperCAmelCase__( self : List[str] )-> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def UpperCAmelCase__( self : Tuple )-> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCAmelCase__( self : List[Any] )-> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def UpperCAmelCase__( self : str )-> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def UpperCAmelCase__( self : Dict )-> Tuple:
super().test_save_load_local(expected_max_difference=3E-3 )
def UpperCAmelCase__( self : Optional[int] )-> Union[str, Any]:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def UpperCAmelCase__( self : str )-> List[Any]:
lowerCAmelCase__ : Tuple = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ : Optional[Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = 2
lowerCAmelCase__ : str = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowerCAmelCase__ : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , scheduler_enum.name )
lowerCAmelCase__ : List[str] = scheduler_cls.from_config(pipe.scheduler.config )
lowerCAmelCase__ : str = pipe(**_SCREAMING_SNAKE_CASE )[0]
outputs.append(_SCREAMING_SNAKE_CASE )
assert check_same_shape(_SCREAMING_SNAKE_CASE )
@require_torch_gpu
@slow
class _a ( unittest.TestCase):
def UpperCAmelCase__( self : Optional[int] )-> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__( self : List[Any] )-> str:
lowerCAmelCase__ : str = torch.manual_seed(33 )
lowerCAmelCase__ : Tuple = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
lowerCAmelCase__ : Optional[int] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
lowerCAmelCase__ : Tuple = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
lowerCAmelCase__ : Tuple = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , output_type='''latent''' ).images
lowerCAmelCase__ : Any = upscaler(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , num_inference_steps=20 , guidance_scale=0 , generator=_SCREAMING_SNAKE_CASE , output_type='''np''' , ).images[0]
lowerCAmelCase__ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def UpperCAmelCase__( self : List[str] )-> Union[str, Any]:
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(33 )
lowerCAmelCase__ : str = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
lowerCAmelCase__ : Union[str, Any] = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
lowerCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
lowerCAmelCase__ : Union[str, Any] = upscaler(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , num_inference_steps=20 , guidance_scale=0 , generator=_SCREAMING_SNAKE_CASE , output_type='''np''' , ).images[0]
lowerCAmelCase__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 131 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _a ( unittest.TestCase):
def __init__( self : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str=13 , _SCREAMING_SNAKE_CASE : List[str]=7 , _SCREAMING_SNAKE_CASE : List[Any]=True , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : Optional[int]=True , _SCREAMING_SNAKE_CASE : Union[str, Any]=True , _SCREAMING_SNAKE_CASE : Optional[int]=99 , _SCREAMING_SNAKE_CASE : Optional[int]=32 , _SCREAMING_SNAKE_CASE : Union[str, Any]=5 , _SCREAMING_SNAKE_CASE : Optional[int]=4 , _SCREAMING_SNAKE_CASE : Optional[int]=37 , _SCREAMING_SNAKE_CASE : Any="gelu" , _SCREAMING_SNAKE_CASE : Tuple=0.1 , _SCREAMING_SNAKE_CASE : Any=0.1 , _SCREAMING_SNAKE_CASE : Tuple=512 , _SCREAMING_SNAKE_CASE : Optional[int]=16 , _SCREAMING_SNAKE_CASE : Optional[int]=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , _SCREAMING_SNAKE_CASE : Tuple=4 , )-> Optional[int]:
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : List[Any] = seq_length
lowerCAmelCase__ : Any = is_training
lowerCAmelCase__ : str = use_attention_mask
lowerCAmelCase__ : Union[str, Any] = use_token_type_ids
lowerCAmelCase__ : List[Any] = use_labels
lowerCAmelCase__ : List[str] = vocab_size
lowerCAmelCase__ : Optional[Any] = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : List[Any] = intermediate_size
lowerCAmelCase__ : Any = hidden_act
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : int = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[int] = max_position_embeddings
lowerCAmelCase__ : List[str] = type_vocab_size
lowerCAmelCase__ : Union[str, Any] = type_sequence_label_size
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : int = num_choices
def UpperCAmelCase__( self : List[str] )-> Optional[Any]:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Union[str, Any] = None
if self.use_attention_mask:
lowerCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_SCREAMING_SNAKE_CASE , )
return config, input_ids, attention_mask
def UpperCAmelCase__( self : Dict )-> Union[str, Any]:
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = config_and_inputs
lowerCAmelCase__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class _a ( _lowercase , unittest.TestCase):
_a : str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__( self : str )-> List[str]:
lowerCAmelCase__ : Tuple = FlaxDistilBertModelTester(self )
@slow
def UpperCAmelCase__( self : Dict )-> Tuple:
for model_class_name in self.all_model_classes:
lowerCAmelCase__ : Tuple = model_class_name.from_pretrained('''distilbert-base-uncased''' )
lowerCAmelCase__ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_flax
class _a ( unittest.TestCase):
@slow
def UpperCAmelCase__( self : Optional[int] )-> List[Any]:
lowerCAmelCase__ : int = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowerCAmelCase__ : Tuple = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCAmelCase__ : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCAmelCase__ : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
lowerCAmelCase__ : str = (1, 11, 768)
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 131 | 1 |
"""simple docstring"""
from collections import defaultdict
class lowerCamelCase :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCamelCase : int = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(SCREAMING_SNAKE_CASE_ ) )
]
UpperCamelCase : Optional[int] = defaultdict(SCREAMING_SNAKE_CASE_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCamelCase : Dict = (1 << len(SCREAMING_SNAKE_CASE_ )) - 1
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCamelCase : str = self.count_ways_until(SCREAMING_SNAKE_CASE_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
UpperCamelCase : List[str] = total_ways_util
return self.dp[mask][task_no]
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
# Store the list of persons for each task
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
for j in task_performed[i]:
self.task[j].append(SCREAMING_SNAKE_CASE_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__A : int = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__A : Optional[int] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 27 |
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A_ ( snake_case_ : Dataset ,snake_case_ : Dict[str, str] ):
'''simple docstring'''
UpperCamelCase : List[str] = args.log_outputs
UpperCamelCase : Tuple = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
UpperCamelCase : List[Any] = load_metric("""wer""" )
UpperCamelCase : Any = load_metric("""cer""" )
# compute metrics
UpperCamelCase : str = wer.compute(references=result["""target"""] ,predictions=result["""prediction"""] )
UpperCamelCase : Dict = cer.compute(references=result["""target"""] ,predictions=result["""prediction"""] )
# print & log results
UpperCamelCase : Optional[int] = f'WER: {wer_result}\nCER: {cer_result}'
print(snake_case_ )
with open(f'{dataset_id}_eval_results.txt' ,"""w""" ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCamelCase : Optional[Any] = f'log_{dataset_id}_predictions.txt'
UpperCamelCase : str = f'log_{dataset_id}_targets.txt'
with open(snake_case_ ,"""w""" ) as p, open(snake_case_ ,"""w""" ) as t:
# mapping function to write output
def write_to_file(snake_case_ : Union[str, Any] ,snake_case_ : Tuple ):
p.write(f'{i}' + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(f'{i}' + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(snake_case_ ,with_indices=snake_case_ )
def A_ ( snake_case_ : str ):
'''simple docstring'''
UpperCamelCase : Dict = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCamelCase : str = re.sub(snake_case_ ,"""""" ,text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCamelCase : List[str] = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
UpperCamelCase : Tuple = """ """.join(text.split(snake_case_ ) )
return text
def A_ ( snake_case_ : str ):
'''simple docstring'''
# load dataset
UpperCamelCase : Union[str, Any] = load_dataset(args.dataset ,args.config ,split=args.split ,use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCamelCase : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCamelCase : Dict = feature_extractor.sampling_rate
# resample audio
UpperCamelCase : Optional[Any] = dataset.cast_column("""audio""" ,Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
UpperCamelCase : int = 0 if torch.cuda.is_available() else -1
UpperCamelCase : Union[str, Any] = pipeline("""automatic-speech-recognition""" ,model=args.model_id ,device=args.device )
# map function to decode audio
def map_to_pred(snake_case_ : Union[str, Any] ):
UpperCamelCase : List[Any] = asr(
batch["""audio"""]["""array"""] ,chunk_length_s=args.chunk_length_s ,stride_length_s=args.stride_length_s )
UpperCamelCase : Union[str, Any] = prediction["""text"""]
UpperCamelCase : Optional[Any] = normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
UpperCamelCase : Any = dataset.map(snake_case_ ,remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ ,snake_case_ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
__A : Optional[Any] = parser.parse_args()
main(args)
| 27 | 1 |
import numpy
# List of input, output pairs
UpperCAmelCase_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCAmelCase_ = (((515, 22, 13), 555), ((61, 35, 49), 150))
UpperCAmelCase_ = [2, 4, 1, 5]
UpperCAmelCase_ = len(train_data)
UpperCAmelCase_ = 0.009
def lowerCamelCase__ ( A__ : Dict , A__ : Optional[int]="train" ):
'''simple docstring'''
return calculate_hypothesis_value(A__ , A__ ) - output(
A__ , A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = 0
for i in range(len(A__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Dict ):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCamelCase__ ( A__ : Dict , A__ : int ):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowerCamelCase__ ( A__ : str , A__ : List[Any]=m ):
'''simple docstring'''
__lowerCamelCase = 0
for i in range(A__ ):
if index == -1:
summation_value += _error(A__ )
else:
summation_value += _error(A__ ) * train_data[i][0][index]
return summation_value
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = summation_of_cost_derivative(A__ , A__ ) / m
return cost_derivative_value
def lowerCamelCase__ ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowerCamelCase = 0.000_002
__lowerCamelCase = 0
__lowerCamelCase = 0
while True:
j += 1
__lowerCamelCase = [0, 0, 0, 0]
for i in range(0 , len(A__ ) ):
__lowerCamelCase = get_cost_derivative(i - 1 )
__lowerCamelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
A__ , A__ , atol=A__ , rtol=A__ , ):
break
__lowerCamelCase = temp_parameter_vector
print(("""Number of iterations:""", j) )
def lowerCamelCase__ ( ):
'''simple docstring'''
for i in range(len(A__ ) ):
print(("""Actual output value:""", output(A__ , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(A__ , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 12 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = StableDiffusionInpaintPipeline
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : int = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : Union[str, Any] = frozenset([])
def lowerCAmelCase__ ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , )
__lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__lowerCamelCase = CLIPTextModel(UpperCamelCase_ )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
__lowerCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInpaintPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: int ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCAmelCase__ ( self: int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = PNDMScheduler.from_pretrained(UpperCamelCase_ , subfolder="""scheduler""" )
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , scheduler=UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 12 | 1 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
a_ = threading.Lock()
a_ = None
a_ = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
a_ = logging.WARNING
a_ = True
def __lowercase ( ) ->List[Any]:
'''simple docstring'''
__A : Any = os.getenv('''TRANSFORMERS_VERBOSITY''' ,snake_case_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def __lowercase ( ) ->str:
'''simple docstring'''
return __name__.split('''.''' )[0]
def __lowercase ( ) ->logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def __lowercase ( ) ->None:
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__A : Union[str, Any] = logging.StreamHandler() # Set sys.stderr as stream.
__A : Union[str, Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
__A : Union[str, Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__A : Dict = False
def __lowercase ( ) ->None:
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
__A : Union[str, Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__A : Tuple = None
def __lowercase ( ) ->Tuple:
'''simple docstring'''
return log_levels
def __lowercase ( snake_case_ : Optional[str] = None ) ->logging.Logger:
'''simple docstring'''
if name is None:
__A : Union[str, Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(snake_case_ )
def __lowercase ( ) ->int:
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __lowercase ( snake_case_ : int ) ->None:
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(snake_case_ )
def __lowercase ( ) ->List[Any]:
'''simple docstring'''
return set_verbosity(snake_case_ )
def __lowercase ( ) ->str:
'''simple docstring'''
return set_verbosity(snake_case_ )
def __lowercase ( ) ->Optional[Any]:
'''simple docstring'''
return set_verbosity(snake_case_ )
def __lowercase ( ) ->Tuple:
'''simple docstring'''
return set_verbosity(snake_case_ )
def __lowercase ( ) ->None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __lowercase ( ) ->None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __lowercase ( snake_case_ : logging.Handler ) ->None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(snake_case_ )
def __lowercase ( snake_case_ : logging.Handler ) ->None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(snake_case_ )
def __lowercase ( ) ->None:
'''simple docstring'''
_configure_library_root_logger()
__A : Optional[Any] = False
def __lowercase ( ) ->None:
'''simple docstring'''
_configure_library_root_logger()
__A : Union[str, Any] = True
def __lowercase ( ) ->None:
'''simple docstring'''
__A : List[Any] = _get_library_root_logger().handlers
for handler in handlers:
__A : List[str] = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(snake_case_ )
def __lowercase ( ) ->None:
'''simple docstring'''
__A : Optional[int] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(snake_case_ )
def __lowercase ( self : int ,*snake_case_ : str ,**snake_case_ : List[Any] ) ->List[Any]:
'''simple docstring'''
__A : int = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' ,snake_case_ )
if no_advisory_warnings:
return
self.warning(*snake_case_ ,**snake_case_ )
a_ = warning_advice
@functools.lru_cache(snake_case_ )
def __lowercase ( self : Any ,*snake_case_ : Union[str, Any] ,**snake_case_ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
self.warning(*snake_case_ ,**snake_case_ )
a_ = warning_once
class __snake_case :
"""simple docstring"""
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): # pylint: disable=unused-argument
'''simple docstring'''
__A : Optional[int] = args[0] if args else None
def __iter__( self ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self , __lowerCamelCase ):
'''simple docstring'''
def empty_fn(*__lowerCamelCase , **__lowerCamelCase ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
'''simple docstring'''
return self
def __exit__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
return
class __snake_case :
"""simple docstring"""
def __call__( self , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*__lowerCamelCase , **__lowerCamelCase )
else:
return EmptyTqdm(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase__( self , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
__A : List[str] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
a_ = _tqdm_cls()
def __lowercase ( ) ->bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def __lowercase ( ) ->Union[str, Any]:
'''simple docstring'''
global _tqdm_active
__A : str = True
hf_hub_utils.enable_progress_bars()
def __lowercase ( ) ->str:
'''simple docstring'''
global _tqdm_active
__A : List[str] = False
hf_hub_utils.disable_progress_bars()
| 357 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = XLMTokenizer
_lowerCamelCase = False
def UpperCamelCase__( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__A : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__A : Dict = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__A : Union[str, Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
__A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__lowerCamelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__lowerCamelCase ) )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = '''lower newer'''
__A : int = '''lower newer'''
return input_text, output_text
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Union[str, Any] = XLMTokenizer(self.vocab_file , self.merges_file )
__A : Optional[Any] = '''lower'''
__A : Any = ['''low''', '''er</w>''']
__A : Tuple = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__A : str = tokens + ['''<unk>''']
__A : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[int] = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
__A : Union[str, Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCamelCase )
__A : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCamelCase )
__A : int = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
__A : List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 291 | 0 |
def UpperCamelCase ( snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(snake_case__ ) )
def UpperCamelCase ( snake_case__ : list[list[int]] , snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> bool:
# Base Case
if index == len(snake_case__ ):
return True
# Recursive Step
for i in range(snake_case__ ):
if valid_coloring(graph[index] , snake_case__ , snake_case__ ):
# Color current vertex
UpperCamelCase : str = i
# Validate coloring
if util_color(snake_case__ , snake_case__ , snake_case__ , index + 1 ):
return True
# Backtrack
UpperCamelCase : Dict = -1
return False
def UpperCamelCase ( snake_case__ : list[list[int]] , snake_case__ : int ) -> list[int]:
UpperCamelCase : List[Any] = [-1] * len(snake_case__ )
if util_color(snake_case__ , snake_case__ , snake_case__ , 0 ):
return colored_vertices
return []
| 119 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Optional[Any] = "speech_to_text_2"
UpperCAmelCase__ : List[Any] = ["past_key_values"]
UpperCAmelCase__ : Any = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self, SCREAMING_SNAKE_CASE_=1_0000, SCREAMING_SNAKE_CASE_=6, SCREAMING_SNAKE_CASE_=2048, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="relu", SCREAMING_SNAKE_CASE_=256, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=1024, **SCREAMING_SNAKE_CASE_, ) -> int:
UpperCamelCase : Optional[int] = vocab_size
UpperCamelCase : List[str] = d_model
UpperCamelCase : List[str] = decoder_ffn_dim
UpperCamelCase : Optional[Any] = decoder_layers
UpperCamelCase : Any = decoder_attention_heads
UpperCamelCase : Tuple = dropout
UpperCamelCase : str = attention_dropout
UpperCamelCase : str = activation_dropout
UpperCamelCase : Union[str, Any] = activation_function
UpperCamelCase : Optional[int] = init_std
UpperCamelCase : Tuple = decoder_layerdrop
UpperCamelCase : Dict = use_cache
UpperCamelCase : Any = decoder_layers
UpperCamelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase : Union[str, Any] = max_target_positions
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_, bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, decoder_start_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
| 119 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
lowerCAmelCase_ = ["transformers", "torch", "note_seq"]
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def lowercase (cls , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def lowercase (cls , *UpperCAmelCase , **UpperCAmelCase ) -> str:
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
| 353 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
) | 270 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase : str = {
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Any = [
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 111 |
from statistics import mean
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> list:
__snake_case: List[Any] = 0
# Number of processes finished
__snake_case: Union[str, Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__snake_case: Dict = [0] * no_of_process
# List to include calculation results
__snake_case: Tuple = [0] * no_of_process
# Sort by arrival time.
__snake_case: int = [burst_time[i] for i in np.argsort(SCREAMING_SNAKE_CASE__)]
__snake_case: Any = [process_name[i] for i in np.argsort(SCREAMING_SNAKE_CASE__)]
arrival_time.sort()
while no_of_process > finished_process_count:
__snake_case: Tuple = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__snake_case: Any = arrival_time[i]
__snake_case: List[Any] = 0
# Index showing the location of the process being performed
__snake_case: Union[str, Any] = 0
# Saves the current response ratio.
__snake_case: Optional[Any] = 0
for i in range(0 , SCREAMING_SNAKE_CASE__):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__snake_case: Tuple = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__snake_case: Union[str, Any] = temp
__snake_case: Optional[int] = i
# Calculate the turn around time
__snake_case: Optional[Any] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__snake_case: Optional[int] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> list:
__snake_case: Union[str, Any] = [0] * no_of_process
for i in range(0 , SCREAMING_SNAKE_CASE__):
__snake_case: Optional[int] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = 5
__UpperCAmelCase : Tuple = ["A", "B", "C", "D", "E"]
__UpperCAmelCase : str = [1, 2, 3, 4, 5]
__UpperCAmelCase : Dict = [1, 2, 3, 4, 5]
__UpperCAmelCase : List[str] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__UpperCAmelCase : List[str] = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
f'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
f'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(f'average waiting time : {mean(waiting_time):.5f}')
print(f'average turn around time : {mean(turn_around_time):.5f}')
| 111 | 1 |
"""simple docstring"""
def _lowercase ( __snake_case ) -> int:
if not isinstance(__snake_case ,__snake_case ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 ,input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 58 |
"""simple docstring"""
import math
def _lowercase ( __snake_case ) -> bool:
__lowerCAmelCase : Optional[Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__snake_case )
def _lowercase ( __snake_case = 1 / 12_345 ) -> int:
__lowerCAmelCase : str = 0
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Tuple = 3
while True:
__lowerCAmelCase : Optional[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__snake_case ):
__lowerCAmelCase : str = int(__snake_case )
total_partitions += 1
if check_partition_perfect(__snake_case ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__snake_case )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""") | 58 | 1 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
a__ : Tuple = parser.parse_args()
a__ : Optional[int] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 161 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
a__ : Tuple = logging.get_logger(__name__)
# General docstring
a__ : List[Any] = "RegNetConfig"
# Base docstring
a__ : Dict = "facebook/regnet-y-040"
a__ : Optional[int] = [1, 1_0_8_8, 7, 7]
# Image classification docstring
a__ : Union[str, Any] = "facebook/regnet-y-040"
a__ : Union[str, Any] = "tabby, tabby cat"
a__ : int = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCamelCase__ ( nn.Module):
def __init__( self :Union[str, Any] , _A :int , _A :int , _A :int = 3 , _A :int = 1 , _A :int = 1 , _A :Optional[str] = "relu" , ) -> int:
'''simple docstring'''
super().__init__()
__A = nn.Convad(
_A , _A , kernel_size=_A , stride=_A , padding=kernel_size // 2 , groups=_A , bias=_A , )
__A = nn.BatchNormad(_A )
__A = ACTaFN[activation] if activation is not None else nn.Identity()
def lowercase_ ( self :Tuple , _A :Union[str, Any] ) -> int:
'''simple docstring'''
__A = self.convolution(_A )
__A = self.normalization(_A )
__A = self.activation(_A )
return hidden_state
class UpperCamelCase__ ( nn.Module):
def __init__( self :Optional[int] , _A :RegNetConfig ) -> List[str]:
'''simple docstring'''
super().__init__()
__A = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
__A = config.num_channels
def lowercase_ ( self :Any , _A :Optional[int] ) -> Optional[int]:
'''simple docstring'''
__A = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
__A = self.embedder(_A )
return hidden_state
class UpperCamelCase__ ( nn.Module):
def __init__( self :Optional[int] , _A :int , _A :int , _A :int = 2 ) -> Any:
'''simple docstring'''
super().__init__()
__A = nn.Convad(_A , _A , kernel_size=1 , stride=_A , bias=_A )
__A = nn.BatchNormad(_A )
def lowercase_ ( self :Optional[int] , _A :Tensor ) -> Tensor:
'''simple docstring'''
__A = self.convolution(_A )
__A = self.normalization(_A )
return hidden_state
class UpperCamelCase__ ( nn.Module):
def __init__( self :Optional[Any] , _A :int , _A :int ) -> List[str]:
'''simple docstring'''
super().__init__()
__A = nn.AdaptiveAvgPoolad((1, 1) )
__A = nn.Sequential(
nn.Convad(_A , _A , kernel_size=1 ) , nn.ReLU() , nn.Convad(_A , _A , kernel_size=1 ) , nn.Sigmoid() , )
def lowercase_ ( self :Any , _A :str ) -> int:
'''simple docstring'''
__A = self.pooler(_A )
__A = self.attention(_A )
__A = hidden_state * attention
return hidden_state
class UpperCamelCase__ ( nn.Module):
def __init__( self :int , _A :RegNetConfig , _A :int , _A :int , _A :int = 1 ) -> List[Any]:
'''simple docstring'''
super().__init__()
__A = in_channels != out_channels or stride != 1
__A = max(1 , out_channels // config.groups_width )
__A = (
RegNetShortCut(_A , _A , stride=_A ) if should_apply_shortcut else nn.Identity()
)
__A = nn.Sequential(
RegNetConvLayer(_A , _A , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_A , _A , stride=_A , groups=_A , activation=config.hidden_act ) , RegNetConvLayer(_A , _A , kernel_size=1 , activation=_A ) , )
__A = ACTaFN[config.hidden_act]
def lowercase_ ( self :Optional[Any] , _A :int ) -> int:
'''simple docstring'''
__A = hidden_state
__A = self.layer(_A )
__A = self.shortcut(_A )
hidden_state += residual
__A = self.activation(_A )
return hidden_state
class UpperCamelCase__ ( nn.Module):
def __init__( self :Optional[int] , _A :RegNetConfig , _A :int , _A :int , _A :int = 1 ) -> Any:
'''simple docstring'''
super().__init__()
__A = in_channels != out_channels or stride != 1
__A = max(1 , out_channels // config.groups_width )
__A = (
RegNetShortCut(_A , _A , stride=_A ) if should_apply_shortcut else nn.Identity()
)
__A = nn.Sequential(
RegNetConvLayer(_A , _A , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_A , _A , stride=_A , groups=_A , activation=config.hidden_act ) , RegNetSELayer(_A , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_A , _A , kernel_size=1 , activation=_A ) , )
__A = ACTaFN[config.hidden_act]
def lowercase_ ( self :int , _A :int ) -> int:
'''simple docstring'''
__A = hidden_state
__A = self.layer(_A )
__A = self.shortcut(_A )
hidden_state += residual
__A = self.activation(_A )
return hidden_state
class UpperCamelCase__ ( nn.Module):
def __init__( self :Tuple , _A :RegNetConfig , _A :int , _A :int , _A :int = 2 , _A :int = 2 , ) -> Any:
'''simple docstring'''
super().__init__()
__A = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
__A = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_A , _A , _A , stride=_A , ) , *[layer(_A , _A , _A ) for _ in range(depth - 1 )] , )
def lowercase_ ( self :List[str] , _A :Optional[int] ) -> Tuple:
'''simple docstring'''
__A = self.layers(_A )
return hidden_state
class UpperCamelCase__ ( nn.Module):
def __init__( self :Union[str, Any] , _A :RegNetConfig ) -> List[str]:
'''simple docstring'''
super().__init__()
__A = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__A = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_A , config.depths[1:] ):
self.stages.append(RegNetStage(_A , _A , _A , depth=_A ) )
def lowercase_ ( self :str , _A :Tensor , _A :bool = False , _A :bool = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
__A = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__A = hidden_states + (hidden_state,)
__A = stage_module(_A )
if output_hidden_states:
__A = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : int = RegNetConfig
UpperCAmelCase__ : Dict = 'regnet'
UpperCAmelCase__ : int = 'pixel_values'
UpperCAmelCase__ : Optional[int] = True
def lowercase_ ( self :str , _A :Optional[int] ) -> Tuple:
'''simple docstring'''
if isinstance(_A , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(_A , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowercase_ ( self :int , _A :str , _A :Dict=False ) -> Dict:
'''simple docstring'''
if isinstance(_A , _A ):
__A = value
a__ : Optional[int] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
a__ : int = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :List[str] , _A :List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(_A )
__A = config
__A = RegNetEmbeddings(_A )
__A = RegNetEncoder(_A )
__A = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase_ ( self :List[Any] , _A :Tensor , _A :Optional[bool] = None , _A :Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
__A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__A = return_dict if return_dict is not None else self.config.use_return_dict
__A = self.embedder(_A )
__A = self.encoder(
_A , output_hidden_states=_A , return_dict=_A )
__A = encoder_outputs[0]
__A = self.pooler(_A )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_A , pooler_output=_A , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Optional[int] , _A :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_A )
__A = config.num_labels
__A = RegNetModel(_A )
# classification head
__A = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase_ ( self :Optional[int] , _A :Optional[torch.FloatTensor] = None , _A :Optional[torch.LongTensor] = None , _A :Optional[bool] = None , _A :Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
__A = return_dict if return_dict is not None else self.config.use_return_dict
__A = self.regnet(_A , output_hidden_states=_A , return_dict=_A )
__A = outputs.pooler_output if return_dict else outputs[1]
__A = self.classifier(_A )
__A = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__A = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__A = 'single_label_classification'
else:
__A = 'multi_label_classification'
if self.config.problem_type == "regression":
__A = MSELoss()
if self.num_labels == 1:
__A = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__A = loss_fct(_A , _A )
elif self.config.problem_type == "single_label_classification":
__A = CrossEntropyLoss()
__A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__A = BCEWithLogitsLoss()
__A = loss_fct(_A , _A )
if not return_dict:
__A = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
| 161 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_a : Any = logging.get_logger(__name__)
_a : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Optional[int] = [
'small',
'small-base',
'medium',
'medium-base',
'intermediate',
'intermediate-base',
'large',
'large-base',
'xlarge',
'xlarge-base',
]
_a : List[str] = {
'vocab_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json',
'funnel-transformer/small-base': (
'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'
),
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json',
'funnel-transformer/large-base': (
'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'
),
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'
),
},
}
_a : int = {F"""funnel-transformer/{name}""": 512 for name in _model_names}
_a : Optional[int] = {F"""funnel-transformer/{name}""": {'do_lower_case': True} for name in _model_names}
class __A ( __SCREAMING_SNAKE_CASE ):
_UpperCamelCase : int = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Dict = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Optional[int] = FunnelTokenizer
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : int = 2
def __init__( self , a__=None , a__=None , a__=True , a__="<unk>" , a__="<sep>" , a__="<pad>" , a__="<cls>" , a__="<mask>" , a__="<s>" , a__="</s>" , a__=True , a__=True , a__=None , a__="##" , **a__ , ):
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , clean_text=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , wordpieces_prefix=__UpperCAmelCase , **__UpperCAmelCase , )
_lowerCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __UpperCAmelCase ) != tokenize_chinese_chars
):
_lowerCAmelCase : Optional[int] = getattr(__UpperCAmelCase , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : List[Any] = do_lower_case
_lowerCAmelCase : List[str] = strip_accents
_lowerCAmelCase : Optional[Any] = tokenize_chinese_chars
_lowerCAmelCase : List[Any] = normalizer_class(**__UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = do_lower_case
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : List[str] = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 366 | """simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : bool = False ) -> str:
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = f"Expected string as input, found {type(_lowerCamelCase )}"
raise ValueError(_lowerCamelCase )
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = f"Expected boolean as use_pascal parameter, found {type(_lowerCamelCase )}"
raise ValueError(_lowerCamelCase )
_lowerCAmelCase : str = input_str.split("""_""" )
_lowerCAmelCase : Optional[int] = 0 if use_pascal else 1
_lowerCAmelCase : List[Any] = words[start_index:]
_lowerCAmelCase : Union[str, Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
_lowerCAmelCase : List[Any] = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 126 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Optional[int] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__SCREAMING_SNAKE_CASE :Optional[int] = TypeVar('''T''')
class A_ ( Generic[T] ):
def __init__( self : List[Any] , snake_case_ : list[T] , snake_case_ : Callable[[T, T], T] ):
_UpperCAmelCase = None
_UpperCAmelCase = len(snake_case_ )
_UpperCAmelCase = [any_type for _ in range(self.N )] + arr
_UpperCAmelCase = fnc
self.build()
def lowercase ( self : List[Any] ):
for p in range(self.N - 1 , 0 , -1 ):
_UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : T ):
p += self.N
_UpperCAmelCase = v
while p > 1:
_UpperCAmelCase = p // 2
_UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase ( self : Any , snake_case_ : int , snake_case_ : int ): # noqa: E741
_UpperCAmelCase , _UpperCAmelCase = l + self.N, r + self.N
_UpperCAmelCase = None
while l <= r:
if l % 2 == 1:
_UpperCAmelCase = self.st[l] if res is None else self.fn(snake_case_ , self.st[l] )
if r % 2 == 0:
_UpperCAmelCase = self.st[r] if res is None else self.fn(snake_case_ , self.st[r] )
_UpperCAmelCase , _UpperCAmelCase = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__SCREAMING_SNAKE_CASE :Union[str, Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__SCREAMING_SNAKE_CASE :List[str] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, min)
__SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, max)
__SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, lambda a, b: a + b)
def UpperCAmelCase_ ( ) -> None:
'''simple docstring'''
for i in range(len(__lowercase ) ):
for j in range(__lowercase , len(__lowercase ) ):
_UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] )
_UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] )
_UpperCAmelCase = reduce(lambda __lowercase , __lowercase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__lowercase , __lowercase )
assert max_range == max_segment_tree.query(__lowercase , __lowercase )
assert sum_range == sum_segment_tree.query(__lowercase , __lowercase )
test_all_segments()
for index, value in test_updates.items():
__SCREAMING_SNAKE_CASE :str = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 22 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase = "new-model"
if is_tf_available():
class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase = NewModelConfig
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ="""bert-base-cased"""
_lowerCAmelCase =AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =TFAutoModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase ="""bert-base-cased"""
_lowerCAmelCase =AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =TFAutoModelForPreTraining.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Tuple:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =TFAutoModelForCausalLM.from_pretrained(__UpperCAmelCase )
_lowerCAmelCase =TFAutoModelForCausalLM.from_pretrained(__UpperCAmelCase , output_loading_info=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Optional[int]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =TFAutoModelWithLMHead.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> List[Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =TFAutoModelForMaskedLM.from_pretrained(__UpperCAmelCase )
_lowerCAmelCase =TFAutoModelForMaskedLM.from_pretrained(__UpperCAmelCase , output_loading_info=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> List[Any]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =TFAutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase )
_lowerCAmelCase =TFAutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase , output_loading_info=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Dict:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase =AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =TFAutoModelForSequenceClassification.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Optional[int]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase =AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =TFAutoModelForQuestionAnswering.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
@slow
@require_tensorflow_probability
def _lowerCAmelCase ( self ) -> int:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
_lowerCAmelCase =AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =TFAutoModelForTableQuestionAnswering.from_pretrained(__UpperCAmelCase )
_lowerCAmelCase =TFAutoModelForTableQuestionAnswering.from_pretrained(
__UpperCAmelCase , output_loading_info=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =TFAutoModelWithLMHead.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__UpperCAmelCase ) , 1_44_10 )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =TFAutoModelWithLMHead.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__UpperCAmelCase ) , 1_44_10 )
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =copy.deepcopy(model.config )
_lowerCAmelCase =["""FunnelBaseModel"""]
_lowerCAmelCase =TFAutoModel.from_config(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__UpperCAmelCase )
_lowerCAmelCase =TFAutoModel.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> List[str]:
try:
AutoConfig.register("""new-model""" , __UpperCAmelCase )
_lowerCAmelCase =[
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(__UpperCAmelCase ):
auto_class.register(__UpperCAmelCase , __UpperCAmelCase )
auto_class.register(__UpperCAmelCase , __UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCAmelCase ):
auto_class.register(__UpperCAmelCase , __UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCAmelCase =BertModelTester(self ).get_config()
_lowerCAmelCase =NewModelConfig(**tiny_config.to_dict() )
_lowerCAmelCase =auto_class.from_config(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__UpperCAmelCase )
_lowerCAmelCase =auto_class.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _lowerCAmelCase ( self ) -> Tuple:
with self.assertRaisesRegex(
__UpperCAmelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
_lowerCAmelCase =TFAutoModel.from_pretrained("""bert-base""" )
def _lowerCAmelCase ( self ) -> Dict:
with self.assertRaisesRegex(
__UpperCAmelCase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
_lowerCAmelCase =TFAutoModel.from_pretrained(__UpperCAmelCase , revision="""aaaaaa""" )
def _lowerCAmelCase ( self ) -> List[str]:
with self.assertRaisesRegex(
__UpperCAmelCase , """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" , ):
_lowerCAmelCase =TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def _lowerCAmelCase ( self ) -> List[Any]:
with self.assertRaisesRegex(__UpperCAmelCase , """Use `from_pt=True` to load this model""" ):
_lowerCAmelCase =TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
_lowerCAmelCase =TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
_lowerCAmelCase =TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
_lowerCAmelCase =TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 366 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase(__UpperCamelCase ) -> bool:
_lowerCAmelCase =str(__UpperCamelCase )
return n == n[::-1]
def _lowerCamelCase(__UpperCamelCase = 1000000 ) -> str:
_lowerCAmelCase =0
for i in range(1 , __UpperCamelCase ):
if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 341 | 0 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
def __init__( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : int=3 , UpperCAmelCase : int=3_2 , UpperCAmelCase : Any=3 , UpperCAmelCase : Any=1_0 , UpperCAmelCase : Optional[int]=[1_0, 2_0, 3_0, 4_0] , UpperCAmelCase : str=[1, 1, 2, 1] , UpperCAmelCase : int=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Optional[Any]="relu" , UpperCAmelCase : int=3 , UpperCAmelCase : str=None , ) -> Union[str, Any]:
__lowerCAmelCase: Optional[Any] = parent
__lowerCAmelCase: Optional[int] = batch_size
__lowerCAmelCase: str = image_size
__lowerCAmelCase: Dict = num_channels
__lowerCAmelCase: str = embeddings_size
__lowerCAmelCase: str = hidden_sizes
__lowerCAmelCase: int = depths
__lowerCAmelCase: Dict = is_training
__lowerCAmelCase: List[str] = use_labels
__lowerCAmelCase: Any = hidden_act
__lowerCAmelCase: List[Any] = num_labels
__lowerCAmelCase: str = scope
__lowerCAmelCase: Optional[Any] = len(UpperCAmelCase )
def UpperCAmelCase ( self : List[str] ) -> Dict:
__lowerCAmelCase: Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase: int = None
if self.use_labels:
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase: Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] ) -> Optional[Any]:
__lowerCAmelCase: Tuple = RegNetModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Dict = model(UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: int = self.num_labels
__lowerCAmelCase: List[Any] = RegNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : str ) -> Any:
__lowerCAmelCase: str = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[int] = config_and_inputs
__lowerCAmelCase: str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : Any = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
_lowercase : Tuple = (
{'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification}
if is_torch_available()
else {}
)
_lowercase : Optional[int] = False
_lowercase : List[Any] = False
_lowercase : List[str] = False
_lowercase : Tuple = False
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
__lowerCAmelCase: List[Any] = RegNetModelTester(self )
__lowerCAmelCase: Dict = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : List[Any] ) -> Dict:
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
pass
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: List[Any] = model_class(UpperCAmelCase )
__lowerCAmelCase: Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase: List[Any] = [*signature.parameters.keys()]
__lowerCAmelCase: Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: Any = model_class(config=UpperCAmelCase )
for name, module in model.named_modules():
if isinstance(UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def UpperCAmelCase ( self : Any ) -> List[str]:
def check_hidden_states_output(UpperCAmelCase : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] ):
__lowerCAmelCase: Optional[Any] = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCAmelCase: List[Any] = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
__lowerCAmelCase: List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase: Any = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: Optional[int] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowerCAmelCase: Optional[int] = layer_type
__lowerCAmelCase: Union[str, Any] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase: int = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> str:
__lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: str = RegNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def _a ( ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self : str ) -> int:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : str ) -> Dict:
__lowerCAmelCase: Union[str, Any] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCAmelCase )
__lowerCAmelCase: Dict = self.default_image_processor
__lowerCAmelCase: Union[str, Any] = prepare_img()
__lowerCAmelCase: Optional[Any] = image_processor(images=UpperCAmelCase , return_tensors='pt' ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
__lowerCAmelCase: List[str] = model(**UpperCAmelCase )
# verify the logits
__lowerCAmelCase: Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 ) )
| 322 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_a = '''scheduler_config.json'''
class A_ ( snake_case__ ):
_lowercase : Optional[Any] = 1
_lowercase : Tuple = 2
_lowercase : Dict = 3
_lowercase : int = 4
_lowercase : Optional[Any] = 5
@dataclass
class A_ ( snake_case__ ):
_lowercase : jnp.ndarray
class A_ :
_lowercase : Optional[int] = SCHEDULER_CONFIG_NAME
_lowercase : Dict = ['dtype']
_lowercase : int = []
_lowercase : Union[str, Any] = True
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , UpperCAmelCase : Dict[str, Any] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : List[str]=False , **UpperCAmelCase : Optional[int] , ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = cls.load_config(
pretrained_model_name_or_path=UpperCAmelCase , subfolder=UpperCAmelCase , return_unused_kwargs=UpperCAmelCase , **UpperCAmelCase , )
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = cls.from_config(UpperCAmelCase , return_unused_kwargs=UpperCAmelCase , **UpperCAmelCase )
if hasattr(UpperCAmelCase , 'create_state' ) and getattr(UpperCAmelCase , 'has_state' , UpperCAmelCase ):
__lowerCAmelCase: Dict = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Union[str, os.PathLike] , UpperCAmelCase : bool = False , **UpperCAmelCase : Any ) -> List[str]:
self.save_config(save_directory=UpperCAmelCase , push_to_hub=UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self : str ) -> Dict:
return self._get_compatibles()
@classmethod
def UpperCAmelCase ( cls : Optional[int] ) -> Any:
__lowerCAmelCase: Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
__lowerCAmelCase: Dict = importlib.import_module(__name__.split('.' )[0] )
__lowerCAmelCase: Dict = [
getattr(UpperCAmelCase , UpperCAmelCase ) for c in compatible_classes_str if hasattr(UpperCAmelCase , UpperCAmelCase )
]
return compatible_classes
def _a ( SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Tuple[int] ) -> jnp.ndarray:
"""simple docstring"""
assert len(SCREAMING_SNAKE_CASE ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(SCREAMING_SNAKE_CASE ) - x.ndim) ) , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any=0.9_9_9 , SCREAMING_SNAKE_CASE : List[Any]=jnp.floataa ) -> jnp.ndarray:
"""simple docstring"""
def alpha_bar(SCREAMING_SNAKE_CASE : str ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
__lowerCAmelCase: str = []
for i in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Union[str, Any] = i / num_diffusion_timesteps
__lowerCAmelCase: List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(SCREAMING_SNAKE_CASE ) / alpha_bar(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) )
return jnp.array(SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
@flax.struct.dataclass
class A_ :
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
@classmethod
def UpperCAmelCase ( cls : str , UpperCAmelCase : Optional[int] ) -> Any:
__lowerCAmelCase: str = scheduler.config
if config.trained_betas is not None:
__lowerCAmelCase: Tuple = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__lowerCAmelCase: Any = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCAmelCase: List[Any] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCAmelCase: str = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
__lowerCAmelCase: Optional[Any] = 1.0 - betas
__lowerCAmelCase: Optional[Any] = jnp.cumprod(UpperCAmelCase , axis=0 )
return cls(
alphas=UpperCAmelCase , betas=UpperCAmelCase , alphas_cumprod=UpperCAmelCase , )
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> int:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = state.alphas_cumprod
__lowerCAmelCase: str = alphas_cumprod[timesteps] ** 0.5
__lowerCAmelCase: Any = sqrt_alpha_prod.flatten()
__lowerCAmelCase: Any = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
__lowerCAmelCase: Any = (1 - alphas_cumprod[timesteps]) ** 0.5
__lowerCAmelCase: str = sqrt_one_minus_alpha_prod.flatten()
__lowerCAmelCase: str = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> Any:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Tuple = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 322 | 1 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
UpperCamelCase__ = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
UpperCamelCase__ = logging.WARNING
def _a ( ):
__lowerCAmelCase = os.getenv("DATASETS_VERBOSITY" , SCREAMING_SNAKE_CASE_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def _a ( ):
return __name__.split("." )[0]
def _a ( ):
return logging.getLogger(_get_library_name() )
def _a ( ):
# Apply our default configuration to the library root logger.
__lowerCAmelCase = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def _a ( ):
__lowerCAmelCase = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def _a ( SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if name is None:
__lowerCAmelCase = _get_library_name()
return logging.getLogger(SCREAMING_SNAKE_CASE_ )
def _a ( ):
return _get_library_root_logger().getEffectiveLevel()
def _a ( SCREAMING_SNAKE_CASE_ : int ):
_get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE_ )
def _a ( ):
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def _a ( ):
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def _a ( ):
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def _a ( ):
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def _a ( ):
__lowerCAmelCase = False
def _a ( ):
__lowerCAmelCase = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class a__ :
def __init__( self , *_A , **_A ): # pylint: disable=unused-argument
"""simple docstring"""
__lowerCAmelCase = args[0] if args else None
def __iter__( self ):
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , _A ):
"""simple docstring"""
def empty_fn(*_A , **_A ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
"""simple docstring"""
return self
def __exit__( self , _A , _A , _A ):
"""simple docstring"""
return
UpperCamelCase__ = True
class a__ :
def __call__( self , *_A , _A=False , **_A ):
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*_A , **_A )
else:
return EmptyTqdm(*_A , **_A )
def __SCREAMING_SNAKE_CASE( self , *_A , **_A ):
"""simple docstring"""
__lowerCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_A , **_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCamelCase__ = _tqdm_cls()
def _a ( ):
global _tqdm_active
return bool(_tqdm_active )
def _a ( ):
global _tqdm_active
__lowerCAmelCase = True
def _a ( ):
global _tqdm_active
__lowerCAmelCase = False
| 102 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=512,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 102 | 1 |
import math
import unittest
def A ( _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(3))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(7))
self.assertTrue(is_prime(11))
self.assertTrue(is_prime(13))
self.assertTrue(is_prime(17))
self.assertTrue(is_prime(19))
self.assertTrue(is_prime(23))
self.assertTrue(is_prime(29))
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
with self.assertRaises(A):
is_prime(-19)
self.assertFalse(
is_prime(0) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2))
self.assertFalse(is_prime(2 * 3))
self.assertFalse(is_prime(3 * 3))
self.assertFalse(is_prime(3 * 5))
self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
unittest.main()
| 339 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__)
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ):
UpperCamelCase = None
UpperCamelCase = None
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ):
UpperCamelCase = datasets.Audio()
UpperCamelCase = '''audio'''
UpperCamelCase = AudioFolderConfig
UpperCamelCase = 42 # definition at the bottom of the script
UpperCamelCase = AudioClassification(audio_column='''audio''' , label_column='''label''' )
UpperCAmelCase__ = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
UpperCAmelCase__ = AUDIO_EXTENSIONS
| 339 | 1 |
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
assert column_title.isupper()
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Optional[int] = len(_lowercase ) - 1
UpperCAmelCase_ : Tuple = 0
while index >= 0:
UpperCAmelCase_ : int = (ord(column_title[index] ) - 64) * pow(26 , _lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 363 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__a = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if isinstance(_lowercase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_lowercase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_lowercase ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = ['''pixel_values''']
def __init__( self ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = 1 / 255 ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase_ : Any = get_size_dict(_SCREAMING_SNAKE_CASE ,default_to_square=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase_ : List[str] = get_size_dict(_SCREAMING_SNAKE_CASE ,param_name='''crop_size''' )
UpperCAmelCase_ : str = do_resize
UpperCAmelCase_ : Union[str, Any] = size
UpperCAmelCase_ : int = do_center_crop
UpperCAmelCase_ : List[str] = crop_size
UpperCAmelCase_ : Optional[int] = resample
UpperCAmelCase_ : List[Any] = do_rescale
UpperCAmelCase_ : Tuple = rescale_factor
UpperCAmelCase_ : Optional[Any] = do_normalize
UpperCAmelCase_ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
UpperCAmelCase_ : Optional[int] = get_size_dict(_SCREAMING_SNAKE_CASE ,default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" in size:
UpperCAmelCase_ : Dict = get_resize_output_image_size(_SCREAMING_SNAKE_CASE ,size['''shortest_edge'''] ,default_to_square=_SCREAMING_SNAKE_CASE )
elif "height" in size and "width" in size:
UpperCAmelCase_ : Tuple = (size['''height'''], size['''width'''])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
UpperCAmelCase_ : str = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_SCREAMING_SNAKE_CASE ,size=(size['''height'''], size['''width''']) ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> Dict:
return rescale(_SCREAMING_SNAKE_CASE ,scale=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE ,mean=_SCREAMING_SNAKE_CASE ,std=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = ChannelDimension.FIRST ,) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Any = to_numpy_array(_SCREAMING_SNAKE_CASE )
if do_resize:
UpperCAmelCase_ : Union[str, Any] = self.resize(image=_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE )
if do_center_crop:
UpperCAmelCase_ : Optional[int] = self.center_crop(_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE )
if do_rescale:
UpperCAmelCase_ : str = self.rescale(image=_SCREAMING_SNAKE_CASE ,scale=_SCREAMING_SNAKE_CASE )
if do_normalize:
UpperCAmelCase_ : List[Any] = self.normalize(image=_SCREAMING_SNAKE_CASE ,mean=_SCREAMING_SNAKE_CASE ,std=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = to_channel_dimension_format(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return image
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = ChannelDimension.FIRST ,**_SCREAMING_SNAKE_CASE ,) -> PIL.Image.Image:
UpperCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : int = resample if resample is not None else self.resample
UpperCAmelCase_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : List[str] = size if size is not None else self.size
UpperCAmelCase_ : Optional[int] = get_size_dict(_SCREAMING_SNAKE_CASE ,default_to_square=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : Any = get_size_dict(_SCREAMING_SNAKE_CASE ,param_name='''crop_size''' )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
UpperCAmelCase_ : List[Any] = make_batched(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = [
[
self._preprocess_image(
image=_SCREAMING_SNAKE_CASE ,do_resize=_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE ,do_center_crop=_SCREAMING_SNAKE_CASE ,crop_size=_SCREAMING_SNAKE_CASE ,do_rescale=_SCREAMING_SNAKE_CASE ,rescale_factor=_SCREAMING_SNAKE_CASE ,do_normalize=_SCREAMING_SNAKE_CASE ,image_mean=_SCREAMING_SNAKE_CASE ,image_std=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,)
for img in video
]
for video in videos
]
UpperCAmelCase_ : Any = {'''pixel_values''': videos}
return BatchFeature(data=_SCREAMING_SNAKE_CASE ,tensor_type=_SCREAMING_SNAKE_CASE ) | 235 | 0 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowercase__ : List[Any] = 1_0
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> int:
for i in range(snake_case__ , snake_case__ ):
if array[i] == target:
return i
return -1
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> int:
lowerCAmelCase = 0
lowerCAmelCase = len(snake_case__ )
while left <= right:
if right - left < precision:
return lin_search(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase = (left + right) // 3 + 1
lowerCAmelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase = one_third - 1
elif array[two_third] < target:
lowerCAmelCase = two_third + 1
else:
lowerCAmelCase = one_third + 1
lowerCAmelCase = two_third - 1
else:
return -1
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> int:
if left < right:
if right - left < precision:
return lin_search(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase = (left + right) // 3 + 1
lowerCAmelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(snake_case__ , one_third - 1 , snake_case__ , snake_case__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , snake_case__ , snake_case__ , snake_case__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , snake_case__ , snake_case__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Tuple = input('''Enter numbers separated by comma:\n''').strip()
lowercase__ : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
lowercase__ : Any = int(input('''Enter the number to be found in the list:\n''').strip())
lowercase__ : str = ite_ternary_search(collection, target)
lowercase__ : Optional[Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'Iterative search: {target} found at positions: {resulta}')
print(f'Recursive search: {target} found at positions: {resulta}')
else:
print('''Not found''')
| 338 | from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowercase__ : Dict = logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase_ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
lowerCAmelCase = self.get_masked_index(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Dict[str, GenericTensor]:
if return_tensors is None:
lowerCAmelCase = self.framework
lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = self.model(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model_inputs['''input_ids''']
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=None ) ->str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCAmelCase = target_ids.shape[0]
lowerCAmelCase = model_outputs['''input_ids'''][0]
lowerCAmelCase = model_outputs['''logits''']
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowerCAmelCase = outputs.numpy()
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
lowerCAmelCase = tf.gather_nd(tf.squeeze(__SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
lowerCAmelCase = tf.math.top_k(__SCREAMING_SNAKE_CASE , k=__SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
lowerCAmelCase = probs[..., target_ids]
lowerCAmelCase , lowerCAmelCase = probs.topk(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
lowerCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowerCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowerCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
lowerCAmelCase = target_ids[p].tolist()
lowerCAmelCase = p
# Filter padding out:
lowerCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCAmelCase = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = [targets]
try:
lowerCAmelCase = self.tokenizer.get_vocab()
except Exception:
lowerCAmelCase = {}
lowerCAmelCase = []
for target in targets:
lowerCAmelCase = vocab.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if id_ is None:
lowerCAmelCase = self.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , max_length=1 , truncation=__SCREAMING_SNAKE_CASE , )['''input_ids''']
if len(__SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
lowerCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
lowerCAmelCase = list(set(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
return target_ids
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) ->Dict:
lowerCAmelCase = {}
if targets is not None:
lowerCAmelCase = self.get_target_ids(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = target_ids
if top_k is not None:
lowerCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
| 338 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
@flax_register_to_config
class UpperCAmelCase_ ( nn.Module , a , a):
lowerCamelCase__ = 32
lowerCamelCase__ = 4
lowerCamelCase__ = 4
lowerCamelCase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCamelCase__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
lowerCamelCase__ = False
lowerCamelCase__ = (320, 640, 1280, 1280)
lowerCamelCase__ = 2
lowerCamelCase__ = 8
lowerCamelCase__ = None
lowerCamelCase__ = 1280
lowerCamelCase__ = 0.0
lowerCamelCase__ = False
lowerCamelCase__ = jnp.floataa
lowerCamelCase__ = True
lowerCamelCase__ = 0
lowerCamelCase__ = False
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = (1, self.in_channels, self.sample_size, self.sample_size)
_lowerCAmelCase : str = jnp.zeros(__a, dtype=jnp.floataa)
_lowerCAmelCase : Optional[int] = jnp.ones((1,), dtype=jnp.intaa)
_lowerCAmelCase : int = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa)
_lowerCAmelCase , _lowerCAmelCase : int = jax.random.split(__a)
_lowerCAmelCase : Tuple = {"params": params_rng, "dropout": dropout_rng}
return self.init(__a, __a, __a, __a)["params"]
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.block_out_channels
_lowerCAmelCase : Optional[Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.")
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_lowerCAmelCase : Dict = self.num_attention_heads or self.attention_head_dim
# input
_lowerCAmelCase : int = nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
_lowerCAmelCase : Optional[int] = FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift)
_lowerCAmelCase : Dict = FlaxTimestepEmbedding(__a, dtype=self.dtype)
_lowerCAmelCase : List[Any] = self.only_cross_attention
if isinstance(__a, __a):
_lowerCAmelCase : List[Any] = (only_cross_attention,) * len(self.down_block_types)
if isinstance(__a, __a):
_lowerCAmelCase : Any = (num_attention_heads,) * len(self.down_block_types)
# down
_lowerCAmelCase : str = []
_lowerCAmelCase : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types):
_lowerCAmelCase : Union[str, Any] = output_channel
_lowerCAmelCase : Tuple = block_out_channels[i]
_lowerCAmelCase : Dict = i == len(__a) - 1
if down_block_type == "CrossAttnDownBlock2D":
_lowerCAmelCase : Optional[int] = FlaxCrossAttnDownBlockaD(
in_channels=__a, out_channels=__a, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
else:
_lowerCAmelCase : List[Any] = FlaxDownBlockaD(
in_channels=__a, out_channels=__a, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(__a)
_lowerCAmelCase : Any = down_blocks
# mid
_lowerCAmelCase : Tuple = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1], dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
# up
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[int] = list(reversed(__a))
_lowerCAmelCase : str = list(reversed(__a))
_lowerCAmelCase : Optional[Any] = list(reversed(__a))
_lowerCAmelCase : Tuple = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types):
_lowerCAmelCase : Dict = output_channel
_lowerCAmelCase : Dict = reversed_block_out_channels[i]
_lowerCAmelCase : Union[str, Any] = reversed_block_out_channels[min(i + 1, len(__a) - 1)]
_lowerCAmelCase : Union[str, Any] = i == len(__a) - 1
if up_block_type == "CrossAttnUpBlock2D":
_lowerCAmelCase : Tuple = FlaxCrossAttnUpBlockaD(
in_channels=__a, out_channels=__a, prev_output_channel=__a, num_layers=self.layers_per_block + 1, num_attention_heads=reversed_num_attention_heads[i], add_upsample=not is_final_block, dropout=self.dropout, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
else:
_lowerCAmelCase : Optional[int] = FlaxUpBlockaD(
in_channels=__a, out_channels=__a, prev_output_channel=__a, num_layers=self.layers_per_block + 1, add_upsample=not is_final_block, dropout=self.dropout, dtype=self.dtype, )
up_blocks.append(__a)
_lowerCAmelCase : Optional[int] = output_channel
_lowerCAmelCase : Union[str, Any] = up_blocks
# out
_lowerCAmelCase : List[str] = nn.GroupNorm(num_groups=32, epsilon=1E-5)
_lowerCAmelCase : Optional[int] = nn.Conv(
self.out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
def __call__( self, __a, __a, __a, __a=None, __a=None, __a = True, __a = False, ):
'''simple docstring'''
if not isinstance(__a, jnp.ndarray):
_lowerCAmelCase : Union[str, Any] = jnp.array([timesteps], dtype=jnp.intaa)
elif isinstance(__a, jnp.ndarray) and len(timesteps.shape) == 0:
_lowerCAmelCase : int = timesteps.astype(dtype=jnp.floataa)
_lowerCAmelCase : Dict = jnp.expand_dims(__a, 0)
_lowerCAmelCase : str = self.time_proj(__a)
_lowerCAmelCase : List[str] = self.time_embedding(__a)
# 2. pre-process
_lowerCAmelCase : str = jnp.transpose(__a, (0, 2, 3, 1))
_lowerCAmelCase : Any = self.conv_in(__a)
# 3. down
_lowerCAmelCase : Any = (sample,)
for down_block in self.down_blocks:
if isinstance(__a, __a):
_lowerCAmelCase , _lowerCAmelCase : Any = down_block(__a, __a, __a, deterministic=not train)
else:
_lowerCAmelCase , _lowerCAmelCase : Dict = down_block(__a, __a, deterministic=not train)
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_lowerCAmelCase : List[Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
__a, __a):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_lowerCAmelCase : str = new_down_block_res_samples
# 4. mid
_lowerCAmelCase : Optional[int] = self.mid_block(__a, __a, __a, deterministic=not train)
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_lowerCAmelCase : Optional[int] = down_block_res_samples[-(self.layers_per_block + 1) :]
_lowerCAmelCase : Tuple = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__a, __a):
_lowerCAmelCase : Union[str, Any] = up_block(
__a, temb=__a, encoder_hidden_states=__a, res_hidden_states_tuple=__a, deterministic=not train, )
else:
_lowerCAmelCase : Optional[int] = up_block(__a, temb=__a, res_hidden_states_tuple=__a, deterministic=not train)
# 6. post-process
_lowerCAmelCase : Union[str, Any] = self.conv_norm_out(__a)
_lowerCAmelCase : Tuple = nn.silu(__a)
_lowerCAmelCase : Optional[int] = self.conv_out(__a)
_lowerCAmelCase : Any = jnp.transpose(__a, (0, 3, 1, 2))
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__a)
| 300 |
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = 0):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = row, column
_lowerCAmelCase : str = [[default_value for c in range(__a)] for r in range(__a)]
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
_lowerCAmelCase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : List[str] = max(__a, len(str(__a)))
_lowerCAmelCase : Union[str, Any] = f"%{max_element_length}s"
# Make string and return
def single_line(__a) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : Dict = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(__a) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def snake_case__ ( self, __a):
'''simple docstring'''
if not (isinstance(__a, (list, tuple)) and len(__a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
return self.array[loc[0]][loc[1]]
def __setitem__( self, __a, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
_lowerCAmelCase : Union[str, Any] = value
def __add__( self, __a):
'''simple docstring'''
assert isinstance(__a, __a)
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : str = -self[r, c]
return result
def __sub__( self, __a):
'''simple docstring'''
return self + (-another)
def __mul__( self, __a):
'''simple docstring'''
if isinstance(__a, (int, float)): # Scalar multiplication
_lowerCAmelCase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(__a, __a): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : List[str] = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : Optional[Any] = f"Unsupported type given for another ({type(__a)})"
raise TypeError(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c]
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
assert isinstance(__a, __a) and isinstance(__a, __a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : int = v.transpose()
_lowerCAmelCase : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Union[str, Any] = 1
print(F"a^(-1) is {ainv}" )
# u, v
_lowerCAmelCase : Any = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 1, 2, -3
_lowerCAmelCase : List[Any] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}" )
def A ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 300 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def UpperCamelCase_( snake_case : Callable[[int | float], int | float] , snake_case : int | float , snake_case : int | float , snake_case : int = 1_0_0 , ):
'''simple docstring'''
snake_case_ = x_start
snake_case_ = fnc(snake_case )
snake_case_ = 0.0
for _ in range(snake_case ):
# Approximates curve as a sequence of linear lines and sums their length
snake_case_ = (x_end - x_start) / steps + xa
snake_case_ = fnc(snake_case )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
snake_case_ = xa
snake_case_ = fxa
return length
if __name__ == "__main__":
def UpperCamelCase_( snake_case : List[Any] ):
'''simple docstring'''
return math.sin(1_0 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
_SCREAMING_SNAKE_CASE : Optional[int] = 10
while i <= 10_0000:
print(F"With {i} steps: {line_length(f, -10, 10, i)}")
i *= 10
| 85 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, list[float]]:
"""simple docstring"""
A__ = list(range(len(lowercase_ ) ) )
A__ = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
A__ = 0
A__ = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
A__ = 1
max_value += value[i]
capacity -= weight[i]
else:
A__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 371 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = tempfile.mkdtemp()
# fmt: off
A_ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A_ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A_ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A_ = {"""unk_token""": """<unk>"""}
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase__ ) )
A_ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
A_ = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def snake_case_ ( self , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer()
A_ = self.get_image_processor()
A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
A_ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ )
A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
A_ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
A_ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A_ = self.prepare_image_inputs()
A_ = image_processor(UpperCamelCase__ , return_tensors="""np""" )
A_ = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A_ = """lower newer"""
A_ = processor(text=UpperCamelCase__ )
A_ = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A_ = """lower newer"""
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.batch_decode(UpperCamelCase__ )
A_ = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A_ = """lower newer"""
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 101 | 0 |
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
a : List[Any] = open # noqa: we just need to have a builtin inside this module to test it properly
| 105 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : Optional[int]=1_3 ,lowercase__ : Any=7 ,lowercase__ : Union[str, Any]=True ,lowercase__ : Optional[int]=True ,lowercase__ : List[str]=True ,lowercase__ : str=True ,lowercase__ : Dict=9_9 ,lowercase__ : Union[str, Any]=3_2 ,lowercase__ : List[str]=5 ,lowercase__ : int=4 ,lowercase__ : Dict=3_7 ,lowercase__ : Union[str, Any]="gelu" ,lowercase__ : str=0.1 ,lowercase__ : List[str]=0.1 ,lowercase__ : Any=5_1_2 ,lowercase__ : Optional[int]=1_6 ,lowercase__ : Optional[int]=2 ,lowercase__ : Optional[int]=0.0_2 ,lowercase__ : Dict=False ,lowercase__ : Optional[int]=True ,lowercase__ : str="None" ,lowercase__ : Optional[int]=3 ,lowercase__ : List[Any]=4 ,lowercase__ : Union[str, Any]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = relative_attention
__lowercase = position_biased_input
__lowercase = pos_att_type
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return DebertaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,pos_att_type=self.pos_att_type ,)
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.get_config()
__lowercase = 3_0_0
return config
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[Any] ):
self.parent.assertListEqual(list(result.loss.size() ) ,[] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : Union[str, Any] ):
__lowercase = DebertaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )[0]
__lowercase = model(lowercase__ ,token_type_ids=lowercase__ )[0]
__lowercase = model(lowercase__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) ,[self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : str ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : Dict ,lowercase__ : int ,lowercase__ : Tuple ,lowercase__ : int ):
__lowercase = DebertaForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Optional[int] ,lowercase__ : Optional[int] ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
__lowercase = self.num_labels
__lowercase = DebertaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertListEqual(list(result.logits.size() ) ,[self.batch_size, self.num_labels] )
self.check_loss_output(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int] ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : Optional[int] ):
__lowercase = self.num_labels
__lowercase = DebertaForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Any ,lowercase__ : Any ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : List[str] ):
__lowercase = DebertaForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
'feature-extraction': DebertaModel,
'fill-mask': DebertaForMaskedLM,
'question-answering': DebertaForQuestionAnswering,
'text-classification': DebertaForSequenceClassification,
'token-classification': DebertaForTokenClassification,
'zero-shot': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = DebertaModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = DebertaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def SCREAMING_SNAKE_CASE ( self : str ):
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
__lowercase = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
# compare the actual values for a slice.
__lowercase = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) ,F"{output[:, 1:4, 1:4]}" )
| 104 | 0 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( snake_case_ ):
_lowercase: Any = (EulerDiscreteScheduler,)
_lowercase: Union[str, Any] = 10
def lowercase__ ( self : Optional[int] , **__snake_case : Union[str, Any] ) -> Optional[Any]:
_lowerCAmelCase = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__snake_case )
return config
def lowercase__ ( self : Optional[int] ) -> Any:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__snake_case )
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__snake_case , beta_end=__snake_case )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__snake_case )
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__snake_case )
def lowercase__ ( self : int ) -> Union[str, Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase = sample.to(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = scheduler.scale_model_input(__snake_case , __snake_case )
_lowerCAmelCase = model(__snake_case , __snake_case )
_lowerCAmelCase = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case )
_lowerCAmelCase = output.prev_sample
_lowerCAmelCase = torch.sum(torch.abs(__snake_case ) )
_lowerCAmelCase = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 10.08_07 ) < 1E-2
assert abs(result_mean.item() - 0.01_31 ) < 1E-3
def lowercase__ ( self : List[Any] ) -> List[str]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase = sample.to(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = scheduler.scale_model_input(__snake_case , __snake_case )
_lowerCAmelCase = model(__snake_case , __snake_case )
_lowerCAmelCase = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case )
_lowerCAmelCase = output.prev_sample
_lowerCAmelCase = torch.sum(torch.abs(__snake_case ) )
_lowerCAmelCase = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 0.00_02 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def lowercase__ ( self : Dict ) -> List[str]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=__snake_case )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCAmelCase = sample.to(__snake_case )
for t in scheduler.timesteps:
_lowerCAmelCase = scheduler.scale_model_input(__snake_case , __snake_case )
_lowerCAmelCase = model(__snake_case , __snake_case )
_lowerCAmelCase = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case )
_lowerCAmelCase = output.prev_sample
_lowerCAmelCase = torch.sum(torch.abs(__snake_case ) )
_lowerCAmelCase = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 10.08_07 ) < 1E-2
assert abs(result_mean.item() - 0.01_31 ) < 1E-3
def lowercase__ ( self : Tuple ) -> List[str]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**__snake_case , use_karras_sigmas=__snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=__snake_case )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCAmelCase = sample.to(__snake_case )
for t in scheduler.timesteps:
_lowerCAmelCase = scheduler.scale_model_input(__snake_case , __snake_case )
_lowerCAmelCase = model(__snake_case , __snake_case )
_lowerCAmelCase = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case )
_lowerCAmelCase = output.prev_sample
_lowerCAmelCase = torch.sum(torch.abs(__snake_case ) )
_lowerCAmelCase = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 1_24.52_29_94_99_51_17_19 ) < 1E-2
assert abs(result_mean.item() - 0.1_62_13_93_26_33_39_99_63 ) < 1E-3
| 220 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase = 4_00_00_00 ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase , _lowerCAmelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = b, a + b
return sum(lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 220 | 1 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
lowerCamelCase_ = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': f'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results',
'''emoji''': True,
},
}
]
lowerCamelCase_ = 0
for log in Path().glob('''*.log'''):
lowerCamelCase_ = 0
with open(log, '''r''') as f:
for line in f:
lowerCamelCase_ = json.loads(line)
if line.get('''nodeid''', '''''') != "":
lowerCamelCase_ = line['''nodeid''']
if line.get('''duration''', None) is not None:
lowerCamelCase_ = f'{line["duration"]:.4f}'
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase_ = []
log.unlink()
lowerCamelCase_ = ''''''
lowerCamelCase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase_ = []
lowerCamelCase_ = {}
for test in failed_tests:
lowerCamelCase_ = test[0].split('''::''')
lowerCamelCase_ = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
lowerCamelCase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase_ = [test[0] for test in failed_table]
lowerCamelCase_ = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase_ = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
lowerCamelCase_ = '''Too many failed tests, please see the full report in the Action results.'''
lowerCamelCase_ = len(err) + 10
lowerCamelCase_ = message[: 30_00 - offset] + f'\n...\n```\n{err}'
print(f'### {message}')
else:
lowerCamelCase_ = '''No failed tests! 🤗'''
print(f'## {message}')
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
lowerCamelCase_ = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
lowerCamelCase_ = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
lowerCamelCase_ = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': f'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
payload.append(action_button)
lowerCamelCase_ = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': f'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}',
}
],
}
payload.append(date_report)
lowerCamelCase_ = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
lowerCamelCase_ = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase_ = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase_ = row[0]
else:
lowerCamelCase_ = ''''''
lowerCamelCase_ = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': f'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```',
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 244 |
def __magic_name__ ( __a : str ):
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__a ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 244 | 1 |
import random
from typing import Any
def _SCREAMING_SNAKE_CASE ( lowercase : list ):
'''simple docstring'''
for _ in range(len(lowercase ) ):
lowerCamelCase_ = random.randint(0 , len(lowercase ) - 1 )
lowerCamelCase_ = random.randint(0 , len(lowercase ) - 1 )
lowerCamelCase_ , lowerCamelCase_ = data[b], data[a]
return data
if __name__ == "__main__":
lowerCamelCase : Any = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCamelCase : int = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 362 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : int = logging.getLogger()
lowerCamelCase : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : List[str] , A_ : Optional[Any] ) -> int:
"""simple docstring"""
os.makedirs(A_ , exist_ok=A_ )
lowerCamelCase_ = {'source': 'What is love ?', 'target': 'life'}
lowerCamelCase_ = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCamelCase_ = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(A_ , f"""{split}.{field}""" ) , 'w' ) as f:
f.write(A_ )
def a__ ( self : Optional[Any] , A_ : int , A_ : str = "pytorch" ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = os.path.join(A_ , 'output' )
lowerCamelCase_ = os.path.join(A_ , 'data' )
self._create_dummy_data(data_dir=A_ )
lowerCamelCase_ = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
lowerCamelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(A_ , env=self.get_env() )
lowerCamelCase_ = os.path.join(A_ , 'metrics.json' )
with open(A_ ) as f:
lowerCamelCase_ = json.load(A_ )
return result
@require_torch_gpu
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def a__ ( self : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 208 | 0 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowercase ( unittest.TestCase ):
def __init__( self : str , snake_case : Union[str, Any] , snake_case : Any=1_3 , snake_case : Optional[int]=7 , snake_case : Tuple=True , snake_case : Any=True , snake_case : Dict=True , snake_case : int=True , snake_case : Tuple=9_9 , snake_case : Optional[int]=3_2 , snake_case : int=5 , snake_case : Optional[Any]=4 , snake_case : Any=3_7 , snake_case : Union[str, Any]="gelu" , snake_case : List[Any]=0.1 , snake_case : Any=0.1 , snake_case : List[Any]=5_1_2 , snake_case : int=1_6 , snake_case : Optional[Any]=2 , snake_case : Any=0.02 , snake_case : Dict=4 , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = parent
UpperCamelCase_ : str = batch_size
UpperCamelCase_ : Union[str, Any] = seq_length
UpperCamelCase_ : Optional[Any] = is_training
UpperCamelCase_ : Optional[Any] = use_attention_mask
UpperCamelCase_ : Union[str, Any] = use_token_type_ids
UpperCamelCase_ : List[str] = use_labels
UpperCamelCase_ : int = vocab_size
UpperCamelCase_ : Any = hidden_size
UpperCamelCase_ : int = num_hidden_layers
UpperCamelCase_ : int = num_attention_heads
UpperCamelCase_ : int = intermediate_size
UpperCamelCase_ : Optional[int] = hidden_act
UpperCamelCase_ : List[Any] = hidden_dropout_prob
UpperCamelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCamelCase_ : Tuple = max_position_embeddings
UpperCamelCase_ : Optional[Any] = type_vocab_size
UpperCamelCase_ : Any = type_sequence_label_size
UpperCamelCase_ : Optional[Any] = initializer_range
UpperCamelCase_ : Optional[int] = num_choices
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ : Dict = None
if self.use_attention_mask:
UpperCamelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ : Dict = None
if self.use_token_type_ids:
UpperCamelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ : Any = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : Tuple = config_and_inputs
UpperCamelCase_ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.prepare_config_and_inputs()
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : Tuple = config_and_inputs
UpperCamelCase_ : List[str] = True
UpperCamelCase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowercase ( snake_case_ , unittest.TestCase ):
lowercase = True
lowercase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : int = FlaxRobertaPreLayerNormModelTester(self )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase_ : Any = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=snake_case )
UpperCamelCase_ : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case )
@require_flax
class _lowercase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=snake_case )
UpperCamelCase_ : Tuple = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
UpperCamelCase_ : Tuple = model(snake_case )[0]
UpperCamelCase_ : str = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , snake_case )
# compare the actual values for a slice.
UpperCamelCase_ : Any = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=snake_case )
UpperCamelCase_ : Tuple = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
UpperCamelCase_ : Optional[Any] = model(snake_case )[0]
# compare the actual values for a slice.
UpperCamelCase_ : Any = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
| 175 | from torch import nn
class _lowercase ( nn.Module ):
def __init__( self : Any , snake_case : Dict , snake_case : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCamelCase_ : List[Any] = class_size
UpperCamelCase_ : List[Any] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
UpperCamelCase_ : int = nn.Linear(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : Any ) -> str:
"""simple docstring"""
UpperCamelCase_ : Dict = self.mlp(snake_case )
return logits
| 175 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=18 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=True , ) -> Tuple:
'''simple docstring'''
lowercase__: Any = parent
lowercase__: Union[str, Any] = batch_size
lowercase__: List[Any] = num_channels
lowercase__: Tuple = image_size
lowercase__: int = min_resolution
lowercase__: Optional[Any] = max_resolution
lowercase__: List[Any] = do_resize
lowercase__: Dict = size_divisor
lowercase__: Dict = do_rescale
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __a ( lowerCAmelCase__ , unittest.TestCase ):
__lowercase : str = GLPNImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: Optional[int] = GLPNImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size_divisor' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'resample' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
# Initialize image_processing
lowercase__: Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__: Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase__: Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
# Initialize image_processing
lowercase__: List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__: List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase__: Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
# Initialize image_processing
lowercase__: Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__: Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase__: Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 355 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=1 / 255 , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=True , ) -> List[str]:
'''simple docstring'''
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase__: Dict = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
lowercase__: Tuple = parent
lowercase__: Optional[Any] = batch_size
lowercase__: Any = num_channels
lowercase__: str = min_resolution
lowercase__: Dict = max_resolution
lowercase__: Any = do_resize
lowercase__: str = size
lowercase__: Any = do_rescale
lowercase__: Union[str, Any] = rescale_factor
lowercase__: Optional[int] = do_normalize
lowercase__: Union[str, Any] = image_mean
lowercase__: List[str] = image_std
lowercase__: Optional[Any] = do_pad
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> int:
'''simple docstring'''
if not batched:
lowercase__: List[Any] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
lowercase__ , lowercase__: List[str] = image.size
else:
lowercase__ , lowercase__: str = image.shape[1], image.shape[2]
if w < h:
lowercase__: Optional[int] = int(self.size['shortest_edge'] * h / w )
lowercase__: int = self.size['shortest_edge']
elif w > h:
lowercase__: Tuple = self.size['shortest_edge']
lowercase__: int = int(self.size['shortest_edge'] * w / h )
else:
lowercase__: Tuple = self.size['shortest_edge']
lowercase__: Optional[Any] = self.size['shortest_edge']
else:
lowercase__: str = []
for image in image_inputs:
lowercase__ , lowercase__: Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__: Union[str, Any] = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
lowercase__: Any = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : Tuple = DetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Optional[int] = DetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_std' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_rescale' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'rescale_factor' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_pad' ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
lowercase__: str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
# Initialize image_processing
lowercase__: Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__: str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
lowercase__: Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Tuple = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ , lowercase__: Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
lowercase__: Dict = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
# Initialize image_processing
lowercase__: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__: Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
lowercase__: Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Optional[int] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__: Union[str, Any] = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
# Initialize image_processing
lowercase__: Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__: Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
lowercase__: Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__: Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Optional[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
# prepare image and target
lowercase__: Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowercase__: Optional[int] = json.loads(f.read() )
lowercase__: Optional[Any] = {'image_id': 39_769, 'annotations': target}
# encode them
lowercase__: Optional[Any] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
lowercase__: List[Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors='pt' )
# verify pixel values
lowercase__: Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , lowerCAmelCase__ )
lowercase__: Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
lowercase__: List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowerCAmelCase__ ) )
# verify boxes
lowercase__: Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowerCAmelCase__ )
lowercase__: int = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
lowercase__: List[Any] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowerCAmelCase__ ) )
# verify is_crowd
lowercase__: Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowerCAmelCase__ ) )
# verify class_labels
lowercase__: List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowerCAmelCase__ ) )
# verify orig_size
lowercase__: Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowerCAmelCase__ ) )
# verify size
lowercase__: Tuple = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowerCAmelCase__ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
# prepare image, target and masks_path
lowercase__: List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowercase__: Tuple = json.loads(f.read() )
lowercase__: Tuple = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
lowercase__: List[str] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowercase__: Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
lowercase__: Dict = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors='pt' )
# verify pixel values
lowercase__: Any = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , lowerCAmelCase__ )
lowercase__: Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
lowercase__: str = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowerCAmelCase__ ) )
# verify boxes
lowercase__: Dict = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowerCAmelCase__ )
lowercase__: str = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
lowercase__: Optional[int] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowerCAmelCase__ ) )
# verify is_crowd
lowercase__: List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowerCAmelCase__ ) )
# verify class_labels
lowercase__: Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowerCAmelCase__ ) )
# verify masks
lowercase__: str = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowerCAmelCase__ )
# verify orig_size
lowercase__: Optional[int] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowerCAmelCase__ ) )
# verify size
lowercase__: Optional[int] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowerCAmelCase__ ) )
| 288 | 0 |
"""simple docstring"""
lowercase__ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
lowercase__ = ['a', 'b', 'c', 'd', 'e']
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: List[Any] = start
# add current to visited
visited.append(_SCREAMING_SNAKE_CASE )
a__: Optional[int] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
a__: str = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(_SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
a__: Optional[Any] = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
lowercase__ = topological_sort('a', [], [])
print(sort)
| 290 | """simple docstring"""
from math import pow, sqrt
def __a ( *_SCREAMING_SNAKE_CASE ) ->bool:
a__: Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) > 0 and all(value > 0.0 for value in values )
return result
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 290 | 1 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__SCREAMING_SNAKE_CASE ={
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__SCREAMING_SNAKE_CASE ={"facebook/blenderbot-3B": 128}
class UpperCamelCase ( lowercase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BlenderbotTokenizer
def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]:
'''simple docstring'''
super().__init__(
__UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,)
lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space:
lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) )
lowercase_ : Any = add_prefix_space
lowercase_ : Tuple = pre_tok_class(**__UpperCamelCase )
lowercase_ : int = add_prefix_space
lowercase_ : Any = 'post_processor'
lowercase_ : Optional[Any] = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase )
if tokenizer_component_instance:
lowercase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ : str = tuple(state['sep'] )
if "cls" in state:
lowercase_ : Union[str, Any] = tuple(state['cls'] )
lowercase_ : str = False
if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space:
lowercase_ : Dict = add_prefix_space
lowercase_ : int = True
if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets:
lowercase_ : Optional[Any] = trim_offsets
lowercase_ : Tuple = True
if changes_to_apply:
lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,state.pop('type' ) )
lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value
lowercase_ : str = value
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ : Optional[int] = kwargs.get('is_split_into_words' ,__UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : int = [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Any:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]:
'''simple docstring'''
lowercase_ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
lowercase_ : Dict = ' '.join(__UpperCamelCase )
lowercase_ : str = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
lowercase_ : List[str] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 358 | """simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : int = set_counts
lowercase_ : List[Any] = max(__UpperCamelCase )
lowercase_ : Union[str, Any] = len(__UpperCamelCase )
lowercase_ : Dict = [1] * num_sets
lowercase_ : Optional[int] = list(range(__UpperCamelCase ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase )
lowercase_ : int = self.get_parent(__UpperCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase_ : Tuple = 0
lowercase_ : str = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase_ : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase_ : str = 0
lowercase_ : Tuple = src_parent
lowercase_ : int = self.set_counts[src_parent]
lowercase_ : str = max(self.max_set ,__UpperCamelCase )
return True
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 321 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __SCREAMING_SNAKE_CASE ( A__ ):
snake_case_ = """big_bird"""
def __init__( self : Any , __lowercase : Dict=5_03_58 , __lowercase : int=7_68 , __lowercase : Tuple=12 , __lowercase : Dict=12 , __lowercase : Optional[Any]=30_72 , __lowercase : Dict="gelu_new" , __lowercase : str=0.1 , __lowercase : str=0.1 , __lowercase : Any=40_96 , __lowercase : Dict=2 , __lowercase : Optional[Any]=0.02 , __lowercase : Union[str, Any]=1e-12 , __lowercase : List[Any]=True , __lowercase : Dict=0 , __lowercase : Union[str, Any]=1 , __lowercase : List[str]=2 , __lowercase : int=66 , __lowercase : Optional[int]="block_sparse" , __lowercase : Any=True , __lowercase : Union[str, Any]=False , __lowercase : str=64 , __lowercase : Optional[Any]=3 , __lowercase : List[str]=None , **__lowercase : List[str] , ) -> str:
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , sep_token_id=_a , **_a , )
SCREAMING_SNAKE_CASE__ : Any =vocab_size
SCREAMING_SNAKE_CASE__ : str =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_size
SCREAMING_SNAKE_CASE__ : int =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] =num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] =intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_act
SCREAMING_SNAKE_CASE__ : int =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int =initializer_range
SCREAMING_SNAKE_CASE__ : Any =type_vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] =layer_norm_eps
SCREAMING_SNAKE_CASE__ : int =use_cache
SCREAMING_SNAKE_CASE__ : Dict =rescale_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] =attention_type
SCREAMING_SNAKE_CASE__ : Tuple =use_bias
SCREAMING_SNAKE_CASE__ : Dict =block_size
SCREAMING_SNAKE_CASE__ : Optional[int] =num_random_blocks
SCREAMING_SNAKE_CASE__ : List[str] =classifier_dropout
class __SCREAMING_SNAKE_CASE ( A__ ):
@property
def __magic_name__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : Any ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE__ : List[Any] ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 152 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase : int = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : List[Any] , _a : List[str]=7 , _a : List[str]=3 , _a : Tuple=18 , _a : Tuple=30 , _a : str=400 , _a : Tuple=None , _a : Union[str, Any]=True , _a : List[str]=True , _a : Optional[int]=None , ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =size if size is not None else {'height': 20, 'width': 20}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =do_convert_rgb
_SCREAMING_SNAKE_CASE =[512, 1024, 2048, 4096]
_SCREAMING_SNAKE_CASE =patch_size if patch_size is not None else {'height': 16, 'width': 16}
def A ( self : Any ) -> List[str]:
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def A ( self : int ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
_SCREAMING_SNAKE_CASE =Image.open(requests.get(_a , stream=_a ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class A__ ( A__ , unittest.TestCase ):
A__ = PixaStructImageProcessor if is_vision_available() else None
def A ( self : Dict ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =PixaStructImageProcessingTester(self )
@property
def A ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Any ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , 'do_normalize' ) )
self.assertTrue(hasattr(_a , 'do_convert_rgb' ) )
def A ( self : Any ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_dummy_image()
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
_SCREAMING_SNAKE_CASE =2048
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='pt' , max_patches=_a )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1e-3 , rtol=1e-3 ) )
def A ( self : Any ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
_SCREAMING_SNAKE_CASE =True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_a ):
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
_SCREAMING_SNAKE_CASE ='Hello'
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a , header_text=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a , header_text=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A ( self : List[Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class A__ ( A__ , unittest.TestCase ):
A__ = PixaStructImageProcessor if is_vision_available() else None
def A ( self : str ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =PixaStructImageProcessingTester(self , num_channels=4 )
_SCREAMING_SNAKE_CASE =3
@property
def A ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[str] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , 'do_normalize' ) )
self.assertTrue(hasattr(_a , 'do_convert_rgb' ) )
def A ( self : Dict ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 47 | 0 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
a_ : Any = get_logger(__name__)
class a :
def __init__( self , __magic_name__ , __magic_name__=None ) -> Any:
_a = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , __magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
_a = module._original_module if isinstance(__magic_name__ , _PatchedModuleObj ) else module
class a :
_lowerCAmelCase = []
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ) -> int:
_a = obj
_a = target
_a = new
_a = target.split('.' )[0]
_a = {}
_a = attrs or []
def __enter__( self ) -> List[Any]:
*_a , _a = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(__magic_name__ ) ):
try:
_a = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_a = getattr(self.obj , __magic_name__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(__magic_name__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_a = obj_attr
# patch at top level
setattr(self.obj , __magic_name__ , _PatchedModuleObj(__magic_name__ , attrs=self.attrs ) )
_a = getattr(self.obj , __magic_name__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(__magic_name__ , __magic_name__ , _PatchedModuleObj(getattr(__magic_name__ , __magic_name__ , __magic_name__ ) , attrs=self.attrs ) )
_a = getattr(__magic_name__ , __magic_name__ )
# finally set the target attribute
setattr(__magic_name__ , __magic_name__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_a = getattr(import_module('.'.join(__magic_name__ ) ) , __magic_name__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , __magic_name__ ) is attr_value:
_a = getattr(self.obj , __magic_name__ )
setattr(self.obj , __magic_name__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_a = globals()['__builtins__'][target_attr]
setattr(self.obj , __magic_name__ , self.new )
else:
raise RuntimeError(f'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__( self , *__magic_name__ ) -> Union[str, Any]:
for attr in list(self.original ):
setattr(self.obj , __magic_name__ , self.original.pop(__magic_name__ ) )
def __UpperCAmelCase ( self ) -> str:
self.__enter__()
self._active_patches.append(self )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 104 |
'''simple docstring'''
def _A (lowerCAmelCase__ :list[int] , lowerCAmelCase__ :list[int] ) -> None:
'''simple docstring'''
_a = len(lowerCAmelCase__ )
print('The following activities are selected:' )
# The first activity is always selected
_a = 0
print(lowerCAmelCase__ , end=',' )
# Consider rest of the activities
for j in range(lowerCAmelCase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowerCAmelCase__ , end=',' )
_a = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : List[str] = [1, 3, 0, 5, 8, 5]
a_ : str = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 104 | 1 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,A : Tuple ,A : str=13 ,A : List[str]=7 ,A : Dict=True ,A : Optional[Any]=True ,A : Union[str, Any]=False ,A : Optional[int]=True ,A : int=99 ,A : Any=32 ,A : int=5 ,A : Tuple=4 ,A : Optional[Any]=37 ,A : Dict="gelu" ,A : int=0.1 ,A : Optional[Any]=0.1 ,A : int=5_12 ,A : Tuple=16 ,A : Any=2 ,A : int=0.02 ,A : Optional[int]=3 ,A : str=4 ,A : Tuple=None ,):
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = scope
def UpperCamelCase_ ( self : int ):
__A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__A = ids_tensor([self.batch_size] ,self.num_choices )
__A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Optional[int] ):
return BioGptConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self : int ,A : Optional[Any] ,A : Any ,A : Any ,A : Any ,A : int ,A : List[str] ,A : List[Any] ):
__A = BioGptModel(config=A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Any ,A : Tuple ,A : str ,A : Optional[Any] ,A : str ,A : Dict ,A : Dict ,A : Tuple ,A : Union[str, Any] ,A : Tuple ,):
__A = BioGptForCausalLM(config=A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Union[str, Any] ,A : int ,A : Dict ,A : Optional[Any] ,A : List[Any] ,A : Any ,*A : Optional[Any] ):
__A = BioGptModel(config=A )
model.to(A )
model.eval()
# create attention mask
__A = torch.ones(input_ids.shape ,dtype=torch.long ,device=A )
__A = self.seq_length // 2
__A = 0
# first forward pass
__A , __A = model(A ,attention_mask=A ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__A = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# change a random masked slice from input_ids
__A = ids_tensor((1,) ,A ).item() + 1
__A = ids_tensor((self.batch_size, 1) ,config.vocab_size ).squeeze(-1 )
__A = random_other_next_tokens
# append to next input_ids and attn_mask
__A = torch.cat([input_ids, next_tokens] ,dim=-1 )
__A = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) ,dtype=torch.long ,device=A )] ,dim=1 ,)
# get two different outputs
__A = model(A ,attention_mask=A )["last_hidden_state"]
__A = model(A ,past_key_values=A ,attention_mask=A )["last_hidden_state"]
# select random slice
__A = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
__A = output_from_no_past[:, -1, random_slice_idx].detach()
__A = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A ,A ,atol=1E-3 ) )
def UpperCamelCase_ ( self : Dict ,A : List[str] ,A : Dict ,A : Dict ,A : Tuple ,A : Optional[int] ,*A : int ):
__A = BioGptModel(config=A ).to(A ).eval()
__A = torch.ones(input_ids.shape ,dtype=torch.long ,device=A )
# first forward pass
__A = model(A ,attention_mask=A ,use_cache=A )
__A , __A = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__A = ids_tensor((self.batch_size, 3) ,config.vocab_size )
__A = ids_tensor((self.batch_size, 3) ,2 )
# append to next input_ids and
__A = torch.cat([input_ids, next_tokens] ,dim=-1 )
__A = torch.cat([attention_mask, next_attn_mask] ,dim=-1 )
__A = model(A ,attention_mask=A )["last_hidden_state"]
__A = model(A ,attention_mask=A ,past_key_values=A )[
"last_hidden_state"
]
# select random slice
__A = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
__A = output_from_no_past[:, -3:, random_slice_idx].detach()
__A = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A ,A ,atol=1E-3 ) )
def UpperCamelCase_ ( self : Any ,A : Union[str, Any] ,A : List[Any] ,A : str ,A : List[Any] ,A : Optional[int] ,*A : str ,A : List[Any]=False ):
__A = BioGptForCausalLM(A )
model.to(A )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__A = model(A ,labels=A )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCamelCase_ ( self : Tuple ,A : str ,*A : List[Any] ):
__A = BioGptModel(A )
__A = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) ,0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) ,0.01 )
def UpperCamelCase_ ( self : Dict ,A : str ,A : int ,A : str ,A : List[Any] ,A : str ,*A : Optional[Any] ):
__A = self.num_labels
__A = BioGptForTokenClassification(A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,token_type_ids=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : str ):
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
snake_case_ = (BioGptForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
def UpperCamelCase_ ( self : int ):
__A = BioGptModelTester(self )
__A = ConfigTester(self ,config_class=A ,hidden_size=37 )
def UpperCamelCase_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A = type
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*A )
def UpperCamelCase_ ( self : Dict ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*A ,gradient_checkpointing=A )
def UpperCamelCase_ ( self : Tuple ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*A )
@slow
def UpperCamelCase_ ( self : List[Any] ):
__A = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(A )
__A = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
__A = "left"
# Define PAD Token = EOS Token = 50256
__A = tokenizer.eos_token
__A = model.config.eos_token_id
# use different length sentences to test batching
__A = [
"Hello, my dog is a little",
"Today, I",
]
__A = tokenizer(A ,return_tensors="pt" ,padding=A )
__A = inputs["input_ids"].to(A )
__A = model.generate(
input_ids=A ,attention_mask=inputs["attention_mask"].to(A ) ,)
__A = tokenizer(sentences[0] ,return_tensors="pt" ).input_ids.to(A )
__A = model.generate(input_ids=A )
__A = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
__A = tokenizer(sentences[1] ,return_tensors="pt" ).input_ids.to(A )
__A = model.generate(input_ids=A ,max_length=model.config.max_length - num_paddings )
__A = tokenizer.batch_decode(A ,skip_special_tokens=A )
__A = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=A )
__A = tokenizer.decode(output_padded[0] ,skip_special_tokens=A )
__A = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(A ,A )
self.assertListEqual(A ,[non_padded_sentence, padded_sentence] )
@slow
def UpperCamelCase_ ( self : str ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = BioGptModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = 3
__A = input_dict["input_ids"]
__A = input_ids.ne(1 ).to(A )
__A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
__A = BioGptForSequenceClassification(A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,labels=A )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ ( self : List[Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = 3
__A = "multi_label_classification"
__A = input_dict["input_ids"]
__A = input_ids.ne(1 ).to(A )
__A = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
__A = BioGptForSequenceClassification(A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,labels=A )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : Tuple ):
__A = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
__A = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
__A = model(A )[0]
__A = 4_23_84
__A = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape ,A )
__A = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,A ,atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : str ):
__A = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
__A = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(A )
torch.manual_seed(0 )
__A = tokenizer("COVID-19 is" ,return_tensors="pt" ).to(A )
__A = model.generate(
**A ,min_length=1_00 ,max_length=10_24 ,num_beams=5 ,early_stopping=A ,)
__A = tokenizer.decode(output_ids[0] ,skip_special_tokens=A )
__A = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(A ,A )
| 15 |
from __future__ import annotations
UpperCAmelCase__ = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def A ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def A ( _UpperCAmelCase : Matrix ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(_UpperCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = digit
if sudoku(_UpperCAmelCase ) is not None:
return grid
_UpperCAmelCase = 0
return None
def A ( _UpperCAmelCase : Matrix ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(_UpperCAmelCase , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
UpperCAmelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 339 | 0 |
import string
import numpy
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , lowercase_ )
class _a :
'''simple docstring'''
UpperCAmelCase__: Any = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase__: Tuple = numpy.vectorize(lambda __magic_name__ : x % 36 )
UpperCAmelCase__: Union[str, Any] = numpy.vectorize(__magic_name__ )
def __init__( self , A__ ):
A__ : Union[str, Any] = self.modulus(A__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
A__ : Dict = encrypt_key.shape[0]
def __A ( self , A__ ):
return self.key_string.index(A__ )
def __A ( self , A__ ):
return self.key_string[round(A__ )]
def __A ( self ):
A__ : int = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
A__ : Any = det % len(self.key_string )
A__ : Any = len(self.key_string )
if greatest_common_divisor(A__ , len(self.key_string ) ) != 1:
A__ : str = (
F"""determinant modular {req_l} of encryption key({det}) """
F"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(A__ )
def __A ( self , A__ ):
A__ : int = [char for char in text.upper() if char in self.key_string]
A__ : str = chars[-1]
while len(A__ ) % self.break_key != 0:
chars.append(A__ )
return "".join(A__ )
def __A ( self , A__ ):
A__ : Any = self.process_text(text.upper() )
A__ : Union[str, Any] = """"""
for i in range(0 , len(A__ ) - self.break_key + 1 , self.break_key ):
A__ : Union[str, Any] = text[i : i + self.break_key]
A__ : int = [self.replace_letters(A__ ) for char in batch]
A__ : Tuple = numpy.array([vec] ).T
A__ : Optional[Any] = self.modulus(self.encrypt_key.dot(A__ ) ).T.tolist()[
0
]
A__ : List[Any] = """""".join(
self.replace_digits(A__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __A ( self ):
A__ : int = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
A__ : Optional[int] = det % len(self.key_string )
A__ : List[str] = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
A__ : Union[str, Any] = i
break
A__ : Tuple = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(A__ ) )
def __A ( self , A__ ):
A__ : Dict = self.make_decrypt_key()
A__ : Any = self.process_text(text.upper() )
A__ : List[str] = """"""
for i in range(0 , len(A__ ) - self.break_key + 1 , self.break_key ):
A__ : Optional[Any] = text[i : i + self.break_key]
A__ : str = [self.replace_letters(A__ ) for char in batch]
A__ : str = numpy.array([vec] ).T
A__ : int = self.modulus(decrypt_key.dot(A__ ) ).T.tolist()[0]
A__ : List[str] = """""".join(
self.replace_digits(A__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def UpperCamelCase () -> None:
A__ : Tuple = int(input("""Enter the order of the encryption key: """ ) )
A__ : int = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(lowercase_ ):
A__ : Union[str, Any] = [int(lowercase_ ) for x in input().split()]
hill_matrix.append(lowercase_ )
A__ : str = HillCipher(numpy.array(lowercase_ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
A__ : List[Any] = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
A__ : Optional[Any] = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(lowercase_ ) )
elif option == "2":
A__ : List[Any] = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(lowercase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 363 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A_ : List[Any] = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 141 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''ylacombe/bark-small'''
lowerCamelCase_ =tempfile.mkdtemp()
lowerCamelCase_ ='''en_speaker_1'''
lowerCamelCase_ ='''This is a test string'''
lowerCamelCase_ ='''speaker_embeddings_path.json'''
lowerCamelCase_ ='''speaker_embeddings'''
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint, **lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =BarkProcessor(tokenizer=lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ =BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint, speaker_embeddings_dict_path=self.speaker_embeddings_dict_path, )
processor.save_pretrained(
self.tmpdirname, speaker_embeddings_dict_path=self.speaker_embeddings_dict_path, speaker_embeddings_directory=self.speaker_embeddings_directory, )
lowerCamelCase_ =self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowerCamelCase_ =BarkProcessor.from_pretrained(
self.tmpdirname, self.speaker_embeddings_dict_path, bos_token='''(BOS)''', eos_token='''(EOS)''', )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint, speaker_embeddings_dict_path=self.speaker_embeddings_dict_path, )
lowerCamelCase_ =35
lowerCamelCase_ =2
lowerCamelCase_ =8
lowerCamelCase_ ={
'''semantic_prompt''': np.ones(lowerCAmelCase ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCamelCase_ =processor(text=self.input_string, voice_preset=lowerCAmelCase )
lowerCamelCase_ =inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist(), processed_voice_preset.get(lowerCAmelCase, np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCamelCase_ =os.path.join(self.tmpdirname, '''file.npz''' )
np.savez(lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =processor(text=self.input_string, voice_preset=lowerCAmelCase )
lowerCamelCase_ =inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist(), processed_voice_preset.get(lowerCAmelCase, np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCamelCase_ =processor(text=self.input_string, voice_preset=self.voice_preset )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =BarkProcessor(tokenizer=lowerCAmelCase )
lowerCamelCase_ =processor(text=self.input_string )
lowerCamelCase_ =tokenizer(
self.input_string, padding='''max_length''', max_length=256, add_special_tokens=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key].squeeze().tolist() )
| 75 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Union[str, Any] = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 75 | 1 |
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# Initialise PyTorch model
_UpperCAmelCase : Tuple = TaConfig.from_json_file(lowerCAmelCase_ )
print(f"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase : List[str] = TaForConditionalGeneration(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase_ : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 359 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ):
if config_name_or_path is None:
_UpperCAmelCase : List[Any] = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
_UpperCAmelCase : str = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
_UpperCAmelCase : Optional[int] = question_encoder_name_or_path
_UpperCAmelCase : Tuple = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
_UpperCAmelCase : List[Any] = RagConfig.from_pretrained(lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = AutoConfig.from_pretrained(lowerCAmelCase_ )
_UpperCAmelCase : Dict = gen_config
_UpperCAmelCase : int = question_encoder_config
_UpperCAmelCase : Optional[Any] = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
rag_model.save_pretrained(lowerCAmelCase_ )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase_ )
# Save tokenizers.
_UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
_UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
lowerCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
lowerCAmelCase_ : List[Any] = parser.parse_args()
lowerCAmelCase_ : Tuple = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 170 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : int = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """M-CLIP"""
def __init__( self : Optional[Any] , UpperCAmelCase : Union[str, Any]=1024 , UpperCAmelCase : Tuple=768 , **UpperCAmelCase : Optional[int] ) -> Dict:
lowerCamelCase__ : Optional[int] = transformerDimSize
lowerCamelCase__ : Optional[Any] = imageDimSize
super().__init__(**UpperCAmelCase )
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = MCLIPConfig
def __init__( self : List[Any] , UpperCAmelCase : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> Dict:
super().__init__(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Tuple = XLMRobertaModel(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Tuple:
lowerCamelCase__ : Any = self.transformer(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
lowerCamelCase__ : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(UpperCAmelCase ), embs
| 50 | 1 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''ChineseCLIPImageProcessor'''
_lowerCamelCase = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,**lowerCamelCase_ ) -> Optional[Any]:
A = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,lowerCamelCase_ ,)
A = kwargs.pop("""feature_extractor""" )
A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase_ ,lowerCamelCase_ )
A = self.image_processor
def __call__( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=None ,**lowerCamelCase_ ) -> str:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
A = self.tokenizer(lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ )
if images is not None:
A = self.image_processor(lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ )
if text is not None and images is not None:
A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ) ,tensor_type=lowerCamelCase_ )
def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> Any:
return self.tokenizer.batch_decode(*lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> int:
return self.tokenizer.decode(*lowerCamelCase_ ,**lowerCamelCase_ )
@property
def UpperCamelCase__ ( self ) -> Dict:
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,lowerCamelCase_ ,)
return self.image_processor_class
| 77 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = '''distilbert'''
_lowerCamelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self ,lowerCamelCase_=3_0_5_2_2 ,lowerCamelCase_=5_1_2 ,lowerCamelCase_=False ,lowerCamelCase_=6 ,lowerCamelCase_=1_2 ,lowerCamelCase_=7_6_8 ,lowerCamelCase_=4 * 7_6_8 ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.02 ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.2 ,lowerCamelCase_=0 ,**lowerCamelCase_ ,) -> Dict:
A = vocab_size
A = max_position_embeddings
A = sinusoidal_pos_embds
A = n_layers
A = n_heads
A = dim
A = hidden_dim
A = dropout
A = attention_dropout
A = activation
A = initializer_range
A = qa_dropout
A = seq_classif_dropout
super().__init__(**lowerCamelCase_ ,pad_token_id=lowerCamelCase_ )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 77 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = '''unispeech'''
def __init__( self , _snake_case=32 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3072 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.02 , _snake_case=1e-5 , _snake_case="group" , _snake_case="gelu" , _snake_case=(512, 512, 512, 512, 512, 512, 512) , _snake_case=(5, 2, 2, 2, 2, 2, 2) , _snake_case=(10, 3, 3, 3, 3, 2, 2) , _snake_case=False , _snake_case=128 , _snake_case=16 , _snake_case=False , _snake_case=True , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case=320 , _snake_case=2 , _snake_case=0.1 , _snake_case=100 , _snake_case=256 , _snake_case=256 , _snake_case=0.1 , _snake_case="mean" , _snake_case=False , _snake_case=False , _snake_case=256 , _snake_case=80 , _snake_case=0 , _snake_case=1 , _snake_case=2 , _snake_case=0.5 , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case , pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = feat_extract_norm
_lowerCAmelCase = feat_extract_activation
_lowerCAmelCase = list(_snake_case )
_lowerCAmelCase = list(_snake_case )
_lowerCAmelCase = list(_snake_case )
_lowerCAmelCase = conv_bias
_lowerCAmelCase = num_conv_pos_embeddings
_lowerCAmelCase = num_conv_pos_embedding_groups
_lowerCAmelCase = len(self.conv_dim )
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = feat_proj_dropout
_lowerCAmelCase = final_dropout
_lowerCAmelCase = layerdrop
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_ctc_classes
_lowerCAmelCase = vocab_size
_lowerCAmelCase = do_stable_layer_norm
_lowerCAmelCase = use_weighted_layer_sum
_lowerCAmelCase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase = apply_spec_augment
_lowerCAmelCase = mask_time_prob
_lowerCAmelCase = mask_time_length
_lowerCAmelCase = mask_time_min_masks
_lowerCAmelCase = mask_feature_prob
_lowerCAmelCase = mask_feature_length
_lowerCAmelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase = num_codevectors_per_group
_lowerCAmelCase = num_codevector_groups
_lowerCAmelCase = contrastive_logits_temperature
_lowerCAmelCase = feat_quantizer_dropout
_lowerCAmelCase = num_negatives
_lowerCAmelCase = codevector_dim
_lowerCAmelCase = proj_codevector_dim
_lowerCAmelCase = diversity_loss_weight
# ctc loss
_lowerCAmelCase = ctc_loss_reduction
_lowerCAmelCase = ctc_zero_infinity
# pretraining loss
_lowerCAmelCase = replace_prob
@property
def snake_case ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 82 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 82 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(snake_case ) , '''Tatoeba directory does not exist.''' )
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self ):
UpperCAmelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A )
@slow
def _UpperCamelCase ( self ):
self.resolver.convert_models(["""heb-eng"""] )
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.resolver.write_model_card("""opus-mt-he-en""" ,dry_run=A )
assert mmeta["long_pair"] == "heb-eng"
| 234 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger("""transformers.models.speecht5""")
_UpperCamelCase = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
_UpperCamelCase = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
_UpperCamelCase = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
_UpperCamelCase = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
_UpperCamelCase = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
_UpperCamelCase = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
_UpperCamelCase = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
_UpperCamelCase = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
_UpperCamelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_UpperCamelCase = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_UpperCamelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_UpperCamelCase = []
_UpperCamelCase = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
_UpperCamelCase = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
_UpperCamelCase = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
_UpperCamelCase = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
for attribute in key.split(""".""" ):
UpperCAmelCase = getattr(_snake_case , _snake_case )
if weight_type is not None:
UpperCAmelCase = getattr(_snake_case , _snake_case ).shape
else:
UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCAmelCase = value
elif weight_type == "weight_g":
UpperCAmelCase = value
elif weight_type == "weight_v":
UpperCAmelCase = value
elif weight_type == "bias":
UpperCAmelCase = value
elif weight_type == "running_mean":
UpperCAmelCase = value
elif weight_type == "running_var":
UpperCAmelCase = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase = value
else:
UpperCAmelCase = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase , UpperCAmelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
if task == "s2t":
UpperCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase = MAPPING_S2T
UpperCAmelCase = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCAmelCase = None
UpperCAmelCase = MAPPING_T2S
UpperCAmelCase = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase = MAPPING_S2S
UpperCAmelCase = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(_snake_case , _snake_case ):
logger.info(F'''{name} was ignored''' )
continue
UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCAmelCase , UpperCAmelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
UpperCAmelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCAmelCase = True
if "*" in mapped_key:
UpperCAmelCase = name.split(_snake_case )[0].split(""".""" )[-2]
UpperCAmelCase = mapped_key.replace("""*""" , _snake_case )
if "weight_g" in name:
UpperCAmelCase = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase = """weight_v"""
elif "bias" in name:
UpperCAmelCase = """bias"""
elif "weight" in name:
UpperCAmelCase = """weight"""
elif "running_mean" in name:
UpperCAmelCase = """running_mean"""
elif "running_var" in name:
UpperCAmelCase = """running_var"""
elif "num_batches_tracked" in name:
UpperCAmelCase = """num_batches_tracked"""
else:
UpperCAmelCase = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase = name.split(""".""" )
UpperCAmelCase = int(items[0] )
UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , ):
"""simple docstring"""
if config_path is not None:
UpperCAmelCase = SpeechTaConfig.from_pretrained(_snake_case )
else:
UpperCAmelCase = SpeechTaConfig()
if task == "s2t":
UpperCAmelCase = config.max_text_positions
UpperCAmelCase = SpeechTaForSpeechToText(_snake_case )
elif task == "t2s":
UpperCAmelCase = 1876
UpperCAmelCase = 600
UpperCAmelCase = config.max_speech_positions
UpperCAmelCase = SpeechTaForTextToSpeech(_snake_case )
elif task == "s2s":
UpperCAmelCase = 1876
UpperCAmelCase = config.max_speech_positions
UpperCAmelCase = SpeechTaForSpeechToSpeech(_snake_case )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
UpperCAmelCase = SpeechTaTokenizer(_snake_case , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
UpperCAmelCase = AddedToken("""<mask>""" , lstrip=_snake_case , rstrip=_snake_case )
UpperCAmelCase = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = SpeechTaProcessor(tokenizer=_snake_case , feature_extractor=_snake_case )
processor.save_pretrained(_snake_case )
UpperCAmelCase = torch.load(_snake_case )
recursively_load_weights(fairseq_checkpoint["""model"""] , _snake_case , _snake_case )
model.save_pretrained(_snake_case )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(_snake_case )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCamelCase = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 234 | 1 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__snake_case :List[Any] = 1.054571817E-34 # unit of ℏ : J * s
__snake_case :List[str] = 3E8 # unit of c : m * s^-1
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if (force, area, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if force < 0:
raise ValueError('''Magnitude of force can not be negative''' )
if distance < 0:
raise ValueError('''Distance can not be negative''' )
if area < 0:
raise ValueError('''Area can not be negative''' )
if force == 0:
__a = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
__a = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
__a = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('''One and only one argument must be 0''' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
a_ = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="ernie_m"
UpperCamelCase ={"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , UpperCamelCase_ = 25_00_02 , UpperCamelCase_ = 7_68 , UpperCamelCase_ = 12 , UpperCamelCase_ = 12 , UpperCamelCase_ = 30_72 , UpperCamelCase_ = "gelu" , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 5_14 , UpperCamelCase_ = 0.0_2 , UpperCamelCase_ = 1 , UpperCamelCase_ = 1E-05 , UpperCamelCase_=None , UpperCamelCase_=False , UpperCamelCase_=0.0 , **UpperCamelCase_ , ) -> List[str]:
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowercase : List[str] = vocab_size
__lowercase : Optional[Any] = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Union[str, Any] = num_attention_heads
__lowercase : Dict = intermediate_size
__lowercase : Any = hidden_act
__lowercase : List[str] = hidden_dropout_prob
__lowercase : Optional[Any] = attention_probs_dropout_prob
__lowercase : Dict = max_position_embeddings
__lowercase : Optional[Any] = initializer_range
__lowercase : int = layer_norm_eps
__lowercase : Optional[int] = classifier_dropout
__lowercase : List[Any] = is_decoder
__lowercase : List[str] = act_dropout
| 249 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''WhisperFeatureExtractor'''
a__ ='''WhisperTokenizer'''
def __init__( self , A , A ) -> Any:
super().__init__(A , A )
_UpperCAmelCase : int = self.feature_extractor
_UpperCAmelCase : List[str] = False
def __lowerCAmelCase ( self , A=None , A=None , A=True ) -> Optional[int]:
return self.tokenizer.get_decoder_prompt_ids(task=A , language=A , no_timestamps=A )
def __call__( self , *A , **A ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A , **A )
_UpperCAmelCase : str = kwargs.pop('''audio''' , A )
_UpperCAmelCase : Dict = kwargs.pop('''sampling_rate''' , A )
_UpperCAmelCase : Dict = kwargs.pop('''text''' , A )
if len(A ) > 0:
_UpperCAmelCase : List[Any] = args[0]
_UpperCAmelCase : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_UpperCAmelCase : Optional[Any] = self.feature_extractor(A , *A , sampling_rate=A , **A )
if text is not None:
_UpperCAmelCase : Any = self.tokenizer(A , **A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_UpperCAmelCase : int = encodings['''input_ids''']
return inputs
def __lowerCAmelCase ( self , *A , **A ) -> Optional[Any]:
return self.tokenizer.batch_decode(*A , **A )
def __lowerCAmelCase ( self , *A , **A ) -> Any:
return self.tokenizer.decode(*A , **A )
def __lowerCAmelCase ( self , A , A="np" ) -> Any:
return self.tokenizer.get_prompt_ids(A , return_tensors=A )
| 68 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ (UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_UpperCAmelCase , _UpperCAmelCase : int = array[indexa], array[indexa]
def lowerCamelCase_ (UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if length > 1:
_UpperCAmelCase : str = int(length / 2 )
for i in range(UpperCamelCase__ , low + middle ):
comp_and_swap(UpperCamelCase__ , UpperCamelCase__ , i + middle , UpperCamelCase__ )
bitonic_merge(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
bitonic_merge(UpperCamelCase__ , low + middle , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if length > 1:
_UpperCAmelCase : str = int(length / 2 )
bitonic_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , 1 )
bitonic_sort(UpperCamelCase__ , low + middle , UpperCamelCase__ , 0 )
bitonic_merge(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :Any = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase :Tuple = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 68 | 1 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase=5 ):
"""simple docstring"""
assert masked_input.count('''<mask>''' ) == 1
__lowercase = torch.tensor(tokenizer.encode(lowercase , add_special_tokens=lowercase ) ).unsqueeze(0 ) # Batch size 1
__lowercase = model(lowercase )[0] # The last hidden-state is the first element of the output tuple
__lowercase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__lowercase = logits[0, masked_index, :]
__lowercase = logits.softmax(dim=0 )
__lowercase , __lowercase = prob.topk(k=lowercase , dim=0 )
__lowercase = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase ) )] )
__lowercase = tokenizer.mask_token
__lowercase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
__lowercase = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(lowercase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(lowercase ) , lowercase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase , lowercase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__a : List[str] = CamembertTokenizer.from_pretrained("""camembert-base""")
__a : List[str] = CamembertForMaskedLM.from_pretrained("""camembert-base""")
model.eval()
__a : List[str] = """Le camembert est <mask> :)"""
print(fill_mask(masked_input, model, tokenizer, topk=3)) | 210 | # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a : Any = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[Any] = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 210 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : str = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "xlm-roberta-xl"
def __init__( self : Union[str, Any], _UpperCAmelCase : Optional[int]=2_5_0_8_8_0, _UpperCAmelCase : Tuple=2_5_6_0, _UpperCAmelCase : str=3_6, _UpperCAmelCase : str=3_2, _UpperCAmelCase : List[Any]=1_0_2_4_0, _UpperCAmelCase : Optional[int]="gelu", _UpperCAmelCase : Optional[Any]=0.1, _UpperCAmelCase : Any=0.1, _UpperCAmelCase : str=5_1_4, _UpperCAmelCase : Optional[int]=1, _UpperCAmelCase : Tuple=0.02, _UpperCAmelCase : Optional[Any]=1E-05, _UpperCAmelCase : str=1, _UpperCAmelCase : Optional[int]=0, _UpperCAmelCase : int=2, _UpperCAmelCase : Optional[int]="absolute", _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : Optional[Any]=None, **_UpperCAmelCase : Optional[Any], ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : str = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE__ : Dict = initializer_range
SCREAMING_SNAKE_CASE__ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE__ : int = position_embedding_type
SCREAMING_SNAKE_CASE__ : Any = use_cache
SCREAMING_SNAKE_CASE__ : List[str] = classifier_dropout
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
@property
def A_ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 369 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : List[Any]=1_3, _UpperCAmelCase : Optional[Any]=3_0, _UpperCAmelCase : List[str]=2, _UpperCAmelCase : str=3, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : Optional[Any]=3_2, _UpperCAmelCase : Any=5, _UpperCAmelCase : Optional[Any]=4, _UpperCAmelCase : List[Any]=3_7, _UpperCAmelCase : Optional[int]="gelu", _UpperCAmelCase : int=0.1, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : List[str]=1_0, _UpperCAmelCase : List[Any]=0.02, _UpperCAmelCase : List[Any]=None, ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = parent
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : str = intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : Any = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ : str = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : str = num_patches + 1
def A_ ( self : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : int = self.get_config()
return config, pixel_values, labels
def A_ ( self : int ) -> Tuple:
"""simple docstring"""
return ViTMSNConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, )
def A_ ( self : Dict, _UpperCAmelCase : List[str], _UpperCAmelCase : List[Any], _UpperCAmelCase : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ViTMSNModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : int, _UpperCAmelCase : Dict, _UpperCAmelCase : List[Any], _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = ViTMSNForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(_UpperCAmelCase, labels=_UpperCAmelCase )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ViTMSNForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def A_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCAmelCase_ = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = ViTMSNModelTester(self )
SCREAMING_SNAKE_CASE__ : str = ConfigTester(self, config_class=_UpperCAmelCase, has_text_modality=_UpperCAmelCase, hidden_size=3_7 )
def A_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def A_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
pass
def A_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
SCREAMING_SNAKE_CASE__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase, nn.Linear ) )
def A_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : int = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : str = ["pixel_values"]
self.assertListEqual(arg_names[:1], _UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def A_ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ViTMSNModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _a ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def A_ ( self : Any ) -> Dict:
"""simple docstring"""
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE__ : List[str] = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : Dict = image_processor(images=_UpperCAmelCase, return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(**_UpperCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ : Tuple = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], _UpperCAmelCase, atol=1E-4 ) )
| 191 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.