code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class UpperCamelCase_ (__A ):
__magic_name__ = '''SpeechT5FeatureExtractor'''
__magic_name__ = '''SpeechT5Tokenizer'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ) -> Tuple:
super().__init__(__a , __a )
def __call__( self : int , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = kwargs.pop("audio" , __a )
UpperCAmelCase_ : Dict = kwargs.pop("text" , __a )
UpperCAmelCase_ : Dict = kwargs.pop("text_target" , __a )
UpperCAmelCase_ : Union[str, Any] = kwargs.pop("audio_target" , __a )
UpperCAmelCase_ : Any = kwargs.pop("sampling_rate" , __a )
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" )
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." )
if audio is not None:
UpperCAmelCase_ : Tuple = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a )
elif text is not None:
UpperCAmelCase_ : List[Any] = self.tokenizer(__a , **__a )
else:
UpperCAmelCase_ : Dict = None
if audio_target is not None:
UpperCAmelCase_ : Union[str, Any] = self.feature_extractor(audio_target=__a , *__a , sampling_rate=__a , **__a )
UpperCAmelCase_ : Optional[int] = targets["input_values"]
elif text_target is not None:
UpperCAmelCase_ : List[Any] = self.tokenizer(__a , **__a )
UpperCAmelCase_ : Union[str, Any] = targets["input_ids"]
else:
UpperCAmelCase_ : Union[str, Any] = None
if inputs is None:
return targets
if targets is not None:
UpperCAmelCase_ : Any = labels
UpperCAmelCase_ : List[Any] = targets.get("attention_mask" )
if decoder_attention_mask is not None:
UpperCAmelCase_ : Tuple = decoder_attention_mask
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Dict ) -> Optional[int]:
UpperCAmelCase_ : List[str] = kwargs.pop("input_values" , __a )
UpperCAmelCase_ : int = kwargs.pop("input_ids" , __a )
UpperCAmelCase_ : List[Any] = kwargs.pop("labels" , __a )
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs." )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." )
if input_values is not None:
UpperCAmelCase_ : List[str] = self.feature_extractor.pad(__a , *__a , **__a )
elif input_ids is not None:
UpperCAmelCase_ : Optional[Any] = self.tokenizer.pad(__a , **__a )
else:
UpperCAmelCase_ : List[Any] = None
if labels is not None:
if "input_ids" in labels or (isinstance(__a , __a ) and "input_ids" in labels[0]):
UpperCAmelCase_ : str = self.tokenizer.pad(__a , **__a )
UpperCAmelCase_ : str = targets["input_ids"]
else:
UpperCAmelCase_ : Union[str, Any] = self.feature_extractor.feature_size
UpperCAmelCase_ : str = self.feature_extractor.num_mel_bins
UpperCAmelCase_ : str = self.feature_extractor.pad(__a , *__a , **__a )
UpperCAmelCase_ : List[Any] = feature_size_hack
UpperCAmelCase_ : str = targets["input_values"]
else:
UpperCAmelCase_ : Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
UpperCAmelCase_ : str = labels
UpperCAmelCase_ : List[str] = targets.get("attention_mask" )
if decoder_attention_mask is not None:
UpperCAmelCase_ : Any = decoder_attention_mask
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Optional[int] ) -> str:
return self.tokenizer.batch_decode(*__a , **__a )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Union[str, Any] ) -> int:
return self.tokenizer.decode(*__a , **__a )
| 268
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = "huggingface/label-files"
_lowerCAmelCase : int = "imagenet-1k-id2label.json"
_lowerCAmelCase : Tuple = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : Tuple = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : Tuple = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_lowerCAmelCase : Optional[int] = BitConfig(
conv_layer=_lowerCamelCase , num_labels=1_000 , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , )
return config
def A ( _lowerCamelCase ):
'''simple docstring'''
if "stem.conv" in name:
_lowerCAmelCase : List[str] = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
_lowerCAmelCase : Any = name.replace("blocks" , "layers" )
if "head.fc" in name:
_lowerCAmelCase : Optional[Any] = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
_lowerCAmelCase : Any = "bit." + name
if "bit" not in name and "classifier" not in name:
_lowerCAmelCase : Dict = "bit.encoder." + name
return name
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = get_config(_lowerCamelCase )
# load original model from timm
_lowerCAmelCase : int = create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model
_lowerCAmelCase : Any = timm_model.state_dict()
for key in state_dict.copy().keys():
_lowerCAmelCase : Dict = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val.squeeze() if "head" in key else val
# load HuggingFace model
_lowerCAmelCase : Optional[Any] = BitForImageClassification(_lowerCamelCase )
model.eval()
model.load_state_dict(_lowerCamelCase )
# create image processor
_lowerCAmelCase : Dict = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = transform.transforms
_lowerCAmelCase : Tuple = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
_lowerCAmelCase : Tuple = BitImageProcessor(
do_resize=_lowerCamelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Any = transform(_lowerCamelCase ).unsqueeze(0 )
_lowerCAmelCase : Optional[int] = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
# verify logits
with torch.no_grad():
_lowerCAmelCase : Tuple = model(_lowerCamelCase )
_lowerCAmelCase : str = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
_lowerCAmelCase : Union[str, Any] = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(F"ybelkada/{model_name}" )
processor.push_to_hub(F"ybelkada/{model_name}" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
_snake_case = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 36
| 0
|
_snake_case = 8.3144598
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 356
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ConvNextFeatureExtractor"]
_snake_case = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 300
| 0
|
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 16_000 ):
"""simple docstring"""
lowercase__ : Optional[int] = int(round(sample_rate * max_length ) )
if len(lowerCamelCase__ ) <= sample_length:
return wav
lowercase__ : Dict = randint(0 , len(lowerCamelCase__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """A file containing the training audio paths and labels."""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """A file containing the validation audio paths and labels."""} )
lowercase_ = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
lowercase_ = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
lowercase_ = field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , )
lowercase_ = field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
lowercase_ = field(
default=2_0 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} )
lowercase_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def snake_case ( self : Optional[Any] ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , SCREAMING_SNAKE_CASE , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , lowerCamelCase__ , lowerCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ : str = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase__ )
transformers.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowercase__ : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
lowercase__ : Optional[int] = DatasetDict()
lowercase__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F"""{", ".join(raw_datasets["train"].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"Make sure to set `--label_column_name` to the correct text column - one of "
F"""{", ".join(raw_datasets["train"].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowercase__ : Tuple = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowercase__ : int = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowercase__ : Dict = feature_extractor.model_input_names[0]
def train_transforms(lowerCamelCase__ ):
lowercase__ : int = []
for audio in batch[data_args.audio_column_name]:
lowercase__ : Any = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowerCamelCase__ )
lowercase__ : Optional[int] = feature_extractor(lowerCamelCase__ , sampling_rate=feature_extractor.sampling_rate )
lowercase__ : List[Any] = {model_input_name: inputs.get(lowerCamelCase__ )}
lowercase__ : str = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowerCamelCase__ ):
lowercase__ : Dict = [audio["array"] for audio in batch[data_args.audio_column_name]]
lowercase__ : Optional[int] = feature_extractor(lowerCamelCase__ , sampling_rate=feature_extractor.sampling_rate )
lowercase__ : Tuple = {model_input_name: inputs.get(lowerCamelCase__ )}
lowercase__ : int = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase__ : List[str] = raw_datasets["train"].features[data_args.label_column_name].names
lowercase__ , lowercase__ : List[str] = {}, {}
for i, label in enumerate(lowerCamelCase__ ):
lowercase__ : int = str(lowerCamelCase__ )
lowercase__ : Optional[int] = label
# Load the accuracy metric from the datasets package
lowercase__ : str = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase__ ):
lowercase__ : Optional[Any] = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowerCamelCase__ , references=eval_pred.label_ids )
lowercase__ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCamelCase__ ) , labelaid=lowerCamelCase__ , idalabel=lowerCamelCase__ , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : Optional[int] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase__ : Optional[Any] = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowerCamelCase__ , output_all_columns=lowerCamelCase__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase__ : List[str] = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowerCamelCase__ , output_all_columns=lowerCamelCase__ )
# Initialize our trainer
lowercase__ : Tuple = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=lowerCamelCase__ , tokenizer=lowerCamelCase__ , )
# Training
if training_args.do_train:
lowercase__ : Tuple = None
if training_args.resume_from_checkpoint is not None:
lowercase__ : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ : Union[str, Any] = last_checkpoint
lowercase__ : Dict = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__ : int = trainer.evaluate()
trainer.log_metrics("eval" , lowerCamelCase__ )
trainer.save_metrics("eval" , lowerCamelCase__ )
# Write model card and (optionally) push to hub
lowercase__ : Any = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase__ )
else:
trainer.create_model_card(**lowerCamelCase__ )
if __name__ == "__main__":
main()
| 130
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = []
for part_id in partition_order:
lowercase__ : str = df.where(F"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(lowerCamelCase__ ):
expected_row_ids_and_row_dicts.append((F"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : int = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple = spark.range(100 ).repartition(1 )
lowercase__ : Tuple = Spark(lowerCamelCase__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple = spark.range(10 ).repartition(2 )
lowercase__ : Any = [1, 0]
lowercase__ : Optional[int] = _generate_iterable_examples(lowerCamelCase__ , lowerCamelCase__ ) # Reverse the partitions.
lowercase__ : str = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase__ , lowerCamelCase__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowercase__ , lowercase__ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Union[str, Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : int = spark.range(10 ).repartition(1 )
lowercase__ : Optional[int] = SparkExamplesIterable(lowerCamelCase__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowerCamelCase__ ):
assert row_id == F"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Optional[Any] = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
lowercase__ : int = lambda lowerCamelCase__ : x.reverse()
lowercase__ : str = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase__ , [2, 1, 0] )
lowercase__ : int = SparkExamplesIterable(lowerCamelCase__ ).shuffle_data_sources(lowerCamelCase__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowerCamelCase__ ):
lowercase__ , lowercase__ : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Union[str, Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Optional[Any] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowercase__ : Optional[Any] = SparkExamplesIterable(lowerCamelCase__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(lowerCamelCase__ ):
lowercase__ , lowercase__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowercase__ : int = SparkExamplesIterable(lowerCamelCase__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(lowerCamelCase__ ):
lowercase__ , lowercase__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : int = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : int = spark.range(100 ).repartition(1 )
lowercase__ : Tuple = Spark(lowerCamelCase__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 130
| 1
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase=100 ,_lowerCamelCase=13 ,_lowerCamelCase=30 ,_lowerCamelCase=2 ,_lowerCamelCase=3 ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=32 ,_lowerCamelCase=5 ,_lowerCamelCase=4 ,_lowerCamelCase=37 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=10 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=3 ,) -> List[str]:
'''simple docstring'''
__lowercase = parent
__lowercase = vocab_size
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = num_patches + 1
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowerCamelCase ,initializer_range=self.initializer_range ,)
return config, pixel_values, labels
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = FlaxBeitModel(config=_lowerCamelCase )
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = FlaxBeitForMaskedImageModeling(config=_lowerCamelCase )
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = self.type_sequence_label_size
__lowercase = FlaxBeitForImageClassification(config=_lowerCamelCase )
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = FlaxBeitForImageClassification(_lowerCamelCase )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : str = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _UpperCAmelCase (self ) -> None:
'''simple docstring'''
__lowercase = FlaxBeitModelTester(self )
__lowercase = ConfigTester(self ,config_class=_lowerCamelCase ,has_text_modality=_lowerCamelCase ,hidden_size=37 )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCamelCase )
__lowercase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(_lowerCamelCase ,_lowerCamelCase )
__lowercase = model_class(_lowerCamelCase )
@jax.jit
def model_jitted(_lowerCamelCase ,**_lowerCamelCase ):
return model(pixel_values=_lowerCamelCase ,**_lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
__lowercase = model_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__lowercase = model_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) ,len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase ,_lowerCamelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
__lowercase = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_lowerCamelCase )
def _lowerCAmelCase ( ):
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCamelCase ,return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
__lowercase = np.ones((1, 196) ,dtype=_lowerCamelCase )
# forward pass
__lowercase = model(pixel_values=_lowerCamelCase ,bool_masked_pos=_lowerCamelCase )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 196, 8192)
self.assertEqual(logits.shape ,_lowerCamelCase )
__lowercase = np.array(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] ,_lowerCamelCase ,atol=1E-2 ) )
@slow
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCamelCase ,return_tensors='''np''' )
# forward pass
__lowercase = model(**_lowerCamelCase )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 1000)
self.assertEqual(logits.shape ,_lowerCamelCase )
__lowercase = np.array([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] )
self.assertTrue(np.allclose(logits[0, :3] ,_lowerCamelCase ,atol=1E-4 ) )
__lowercase = 281
self.assertEqual(logits.argmax(-1 ).item() ,_lowerCamelCase )
@slow
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCamelCase ,return_tensors='''np''' )
# forward pass
__lowercase = model(**_lowerCamelCase )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 21841)
self.assertEqual(logits.shape ,_lowerCamelCase )
__lowercase = np.array([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] )
self.assertTrue(np.allclose(logits[0, :3] ,_lowerCamelCase ,atol=1E-4 ) )
__lowercase = 2396
self.assertEqual(logits.argmax(-1 ).item() ,_lowerCamelCase )
| 217
|
'''simple docstring'''
from math import sqrt
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = 0
for i in range(1 , int(sqrt(lowerCamelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowerCamelCase_ ):
total += i + n // i
elif i == sqrt(lowerCamelCase_ ):
total += i
return total - n
def _lowerCAmelCase ( lowerCamelCase_ : int = 1_0_0_0_0 ):
__lowercase = sum(
i
for i in range(1 , lowerCamelCase_ )
if sum_of_divisors(sum_of_divisors(lowerCamelCase_ ) ) == i and sum_of_divisors(lowerCamelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 217
| 1
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=() , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="no" , SCREAMING_SNAKE_CASE__="29500" ) -> Union[str, Any]:
lowercase : str = False
lowercase : List[Any] = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
lowercase : Tuple = True
elif "IPython" in sys.modules:
lowercase : Dict = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
lowercase : List[str] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , SCREAMING_SNAKE_CASE__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
lowercase : List[Any] = 8
lowercase : List[str] = PrepareForLaunch(SCREAMING_SNAKE_CASE__ , distributed_type="""TPU""" )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , nprocs=SCREAMING_SNAKE_CASE__ , start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*SCREAMING_SNAKE_CASE__ )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=SCREAMING_SNAKE_CASE__ , master_addr="""127.0.01""" , master_port=SCREAMING_SNAKE_CASE__ , mixed_precision=SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = PrepareForLaunch(SCREAMING_SNAKE_CASE__ , distributed_type="""MULTI_GPU""" )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , nprocs=SCREAMING_SNAKE_CASE__ , start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowercase : Union[str, Any] = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=() , SCREAMING_SNAKE_CASE__=2 ) -> List[Any]:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=SCREAMING_SNAKE_CASE__ , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ):
lowercase : Tuple = PrepareForLaunch(SCREAMING_SNAKE_CASE__ , debug=SCREAMING_SNAKE_CASE__ )
start_processes(SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , nprocs=SCREAMING_SNAKE_CASE__ , start_method="""fork""" )
| 20
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase : Any = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class __snake_case ( lowerCAmelCase ):
def __init__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
super().__init__(*snake_case ,**snake_case )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _SCREAMING_SNAKE_CASE ( self ,snake_case=None ):
'''simple docstring'''
lowercase : List[Any] = {}
if top_k is not None:
lowercase : int = top_k
return {}, {}, postprocess_params
def __call__( self ,snake_case ,**snake_case ):
'''simple docstring'''
return super().__call__(snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Any = load_image(snake_case )
lowercase : List[Any] = self.image_processor(images=snake_case ,return_tensors=self.framework )
return model_inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : int = self.model(**snake_case )
return model_outputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
lowercase : Tuple = self.model.config.num_labels
if self.framework == "pt":
lowercase : str = model_outputs.logits.softmax(-1 )[0]
lowercase , lowercase : Dict = probs.topk(snake_case )
elif self.framework == "tf":
lowercase : Optional[int] = stable_softmax(model_outputs.logits ,axis=-1 )[0]
lowercase : Union[str, Any] = tf.math.top_k(snake_case ,k=snake_case )
lowercase , lowercase : List[str] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
lowercase : Tuple = scores.tolist()
lowercase : Dict = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(snake_case ,snake_case )]
| 20
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , A : int , A : Any=7 , A : Optional[Any]=3 , A : Tuple=18 , A : Any=30 , A : Optional[int]=400 , A : Tuple=True , A : Dict=32 , A : Tuple=True , ):
__snake_case: int = parent
__snake_case: Any = batch_size
__snake_case: Dict = num_channels
__snake_case: List[Any] = image_size
__snake_case: List[str] = min_resolution
__snake_case: Optional[Any] = max_resolution
__snake_case: List[Any] = do_resize
__snake_case: List[Any] = size_divisor
__snake_case: List[Any] = do_rescale
def UpperCAmelCase__ ( self : Tuple ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = GLPNImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Tuple = GLPNImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Dict ):
__snake_case: int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , """do_resize""" ) )
self.assertTrue(hasattr(_snake_case , """size_divisor""" ) )
self.assertTrue(hasattr(_snake_case , """resample""" ) )
self.assertTrue(hasattr(_snake_case , """do_rescale""" ) )
def UpperCAmelCase__ ( self : int ):
pass
def UpperCAmelCase__ ( self : Any ):
__snake_case: int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__snake_case: Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCAmelCase__ ( self : int ):
__snake_case: Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case: Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__snake_case: Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case: Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__snake_case: List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 361
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
class __snake_case ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = True
@register_to_config
def __init__( self : Union[str, Any] , A : int = 3 , A : int = 3 , A : Tuple[str] = ("DownEncoderBlock2D",) , A : Tuple[str] = ("UpDecoderBlock2D",) , A : Tuple[int] = (64,) , A : int = 1 , A : str = "silu" , A : int = 4 , A : int = 32 , A : int = 32 , A : float = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__snake_case: Any = Encoder(
in_channels=A , out_channels=A , down_block_types=A , block_out_channels=A , layers_per_block=A , act_fn=A , norm_num_groups=A , double_z=A , )
# pass init params to Decoder
__snake_case: int = Decoder(
in_channels=A , out_channels=A , up_block_types=A , block_out_channels=A , layers_per_block=A , norm_num_groups=A , act_fn=A , )
__snake_case: Dict = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__snake_case: int = nn.Convad(A , A , 1 )
__snake_case: List[str] = False
__snake_case: Optional[int] = False
# only relevant if vae tiling is enabled
__snake_case: Any = self.config.sample_size
__snake_case: int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__snake_case: Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__snake_case: Optional[int] = 0.25
def UpperCAmelCase__ ( self : int , A : List[str] , A : Optional[Any]=False ):
if isinstance(A , (Encoder, Decoder) ):
__snake_case: str = value
def UpperCAmelCase__ ( self : str , A : bool = True ):
__snake_case: Union[str, Any] = use_tiling
def UpperCAmelCase__ ( self : Optional[int] ):
self.enable_tiling(A )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[str] = True
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[str] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Any = {}
def fn_recursive_add_processors(A : str , A : torch.nn.Module , A : Dict[str, AttentionProcessor] ):
if hasattr(A , """set_processor""" ):
__snake_case: List[Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , A , A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(A , A , A )
return processors
def UpperCAmelCase__ ( self : Optional[int] , A : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
__snake_case: Any = len(self.attn_processors.keys() )
if isinstance(A , A ) and len(A ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(A )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(A : str , A : torch.nn.Module , A : Optional[Any] ):
if hasattr(A , """set_processor""" ):
if not isinstance(A , A ):
module.set_processor(A )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , A , A )
for name, module in self.named_children():
fn_recursive_attn_processor(A , A , A )
def UpperCAmelCase__ ( self : List[str] ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , A : torch.FloatTensor , A : bool = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(A , return_dict=A )
if self.use_slicing and x.shape[0] > 1:
__snake_case: List[Any] = [self.encoder(A ) for x_slice in x.split(1 )]
__snake_case: Optional[Any] = torch.cat(A )
else:
__snake_case: str = self.encoder(A )
__snake_case: Any = self.quant_conv(A )
__snake_case: Tuple = DiagonalGaussianDistribution(A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=A )
def UpperCAmelCase__ ( self : Tuple , A : torch.FloatTensor , A : bool = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(A , return_dict=A )
__snake_case: Optional[int] = self.post_quant_conv(A )
__snake_case: Union[str, Any] = self.decoder(A )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
@apply_forward_hook
def UpperCAmelCase__ ( self : Tuple , A : torch.FloatTensor , A : bool = True ):
if self.use_slicing and z.shape[0] > 1:
__snake_case: Union[str, Any] = [self._decode(A ).sample for z_slice in z.split(1 )]
__snake_case: List[str] = torch.cat(A )
else:
__snake_case: int = self._decode(A ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=A )
def UpperCAmelCase__ ( self : Any , A : Tuple , A : int , A : List[Any] ):
__snake_case: int = min(a.shape[2] , b.shape[2] , A )
for y in range(A ):
__snake_case: Dict = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def UpperCAmelCase__ ( self : Union[str, Any] , A : Optional[Any] , A : List[str] , A : List[str] ):
__snake_case: Dict = min(a.shape[3] , b.shape[3] , A )
for x in range(A ):
__snake_case: Tuple = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def UpperCAmelCase__ ( self : int , A : torch.FloatTensor , A : bool = True ):
__snake_case: List[str] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__snake_case: Dict = int(self.tile_latent_min_size * self.tile_overlap_factor )
__snake_case: Dict = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__snake_case: Optional[int] = []
for i in range(0 , x.shape[2] , A ):
__snake_case: Optional[int] = []
for j in range(0 , x.shape[3] , A ):
__snake_case: int = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__snake_case: Tuple = self.encoder(A )
__snake_case: Dict = self.quant_conv(A )
row.append(A )
rows.append(A )
__snake_case: Tuple = []
for i, row in enumerate(A ):
__snake_case: str = []
for j, tile in enumerate(A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__snake_case: Optional[Any] = self.blend_v(rows[i - 1][j] , A , A )
if j > 0:
__snake_case: Tuple = self.blend_h(row[j - 1] , A , A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(A , dim=3 ) )
__snake_case: Tuple = torch.cat(A , dim=2 )
__snake_case: Optional[int] = DiagonalGaussianDistribution(A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=A )
def UpperCAmelCase__ ( self : Union[str, Any] , A : torch.FloatTensor , A : bool = True ):
__snake_case: Optional[Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__snake_case: str = int(self.tile_sample_min_size * self.tile_overlap_factor )
__snake_case: int = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__snake_case: List[Any] = []
for i in range(0 , z.shape[2] , A ):
__snake_case: Optional[Any] = []
for j in range(0 , z.shape[3] , A ):
__snake_case: Dict = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__snake_case: Any = self.post_quant_conv(A )
__snake_case: Optional[Any] = self.decoder(A )
row.append(A )
rows.append(A )
__snake_case: Optional[Any] = []
for i, row in enumerate(A ):
__snake_case: Optional[Any] = []
for j, tile in enumerate(A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__snake_case: Tuple = self.blend_v(rows[i - 1][j] , A , A )
if j > 0:
__snake_case: List[str] = self.blend_h(row[j - 1] , A , A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(A , dim=3 ) )
__snake_case: Dict = torch.cat(A , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
def UpperCAmelCase__ ( self : List[Any] , A : torch.FloatTensor , A : bool = False , A : bool = True , A : Optional[torch.Generator] = None , ):
__snake_case: Optional[Any] = sample
__snake_case: Union[str, Any] = self.encode(A ).latent_dist
if sample_posterior:
__snake_case: Optional[Any] = posterior.sample(generator=A )
else:
__snake_case: Dict = posterior.mode()
__snake_case: Any = self.decode(A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
| 293
| 0
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a (__magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
@register_to_config
def __init__( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = False , ):
super().__init__()
A__ : List[str] = nn.Embedding(A__ , A__ )
A__ : Any = nn.Embedding(A__ , A__ )
A__ : Optional[int] = False
A__ : Optional[Any] = nn.Dropout(p=A__ )
A__ : int = TaConfig(
vocab_size=A__ , d_model=A__ , num_heads=A__ , d_kv=A__ , d_ff=A__ , dropout_rate=A__ , feed_forward_proj=A__ , is_decoder=A__ , is_encoder_decoder=A__ , )
A__ : Dict = nn.ModuleList()
for lyr_num in range(A__ ):
A__ : str = TaBlock(A__ )
self.encoders.append(A__ )
A__ : Tuple = TaLayerNorm(A__ )
A__ : str = nn.Dropout(p=A__ )
def __A ( self , A__ , A__ ):
A__ : List[str] = self.token_embedder(A__ )
A__ : Optional[Any] = encoder_input_tokens.shape[1]
A__ : Optional[int] = torch.arange(A__ , device=encoder_input_tokens.device )
x += self.position_encoding(A__ )
A__ : Optional[Any] = self.dropout_pre(A__ )
# inverted the attention mask
A__ : Tuple = encoder_input_tokens.size()
A__ : Union[str, Any] = self.get_extended_attention_mask(A__ , A__ )
for lyr in self.encoders:
A__ : List[str] = lyr(A__ , A__ )[0]
A__ : List[Any] = self.layer_norm(A__ )
return self.dropout_post(A__ ), encoder_inputs_mask
| 192
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a (__magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
@register_to_config
def __init__( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = False , ):
super().__init__()
A__ : List[str] = nn.Embedding(A__ , A__ )
A__ : Any = nn.Embedding(A__ , A__ )
A__ : Optional[int] = False
A__ : Optional[Any] = nn.Dropout(p=A__ )
A__ : int = TaConfig(
vocab_size=A__ , d_model=A__ , num_heads=A__ , d_kv=A__ , d_ff=A__ , dropout_rate=A__ , feed_forward_proj=A__ , is_decoder=A__ , is_encoder_decoder=A__ , )
A__ : Dict = nn.ModuleList()
for lyr_num in range(A__ ):
A__ : str = TaBlock(A__ )
self.encoders.append(A__ )
A__ : Tuple = TaLayerNorm(A__ )
A__ : str = nn.Dropout(p=A__ )
def __A ( self , A__ , A__ ):
A__ : List[str] = self.token_embedder(A__ )
A__ : Optional[Any] = encoder_input_tokens.shape[1]
A__ : Optional[int] = torch.arange(A__ , device=encoder_input_tokens.device )
x += self.position_encoding(A__ )
A__ : Optional[Any] = self.dropout_pre(A__ )
# inverted the attention mask
A__ : Tuple = encoder_input_tokens.size()
A__ : Union[str, Any] = self.get_extended_attention_mask(A__ , A__ )
for lyr in self.encoders:
A__ : List[str] = lyr(A__ , A__ )[0]
A__ : List[Any] = self.layer_norm(A__ )
return self.dropout_post(A__ ), encoder_inputs_mask
| 192
| 1
|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = math.inf , _SCREAMING_SNAKE_CASE = -math.inf , _SCREAMING_SNAKE_CASE = math.inf , _SCREAMING_SNAKE_CASE = -math.inf , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 0.01 , _SCREAMING_SNAKE_CASE = 1 , ):
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = search_prob
UpperCamelCase = start_temperate
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = None
while not search_end:
UpperCamelCase = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCamelCase = current_state
scores.append(_SCREAMING_SNAKE_CASE )
iterations += 1
UpperCamelCase = None
UpperCamelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCamelCase = random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ) # picking a random neighbor
UpperCamelCase = neighbors.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCamelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCamelCase = picked_neighbor
else:
UpperCamelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCamelCase = picked_neighbor
UpperCamelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCamelCase = True
else:
UpperCamelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
| 368
|
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowerCAmelCase__ = NewType('''DataClass''', Any)
lowerCAmelCase__ = NewType('''DataClassType''', Any)
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = {str(_SCREAMING_SNAKE_CASE ): choice for choice in choices}
return lambda _SCREAMING_SNAKE_CASE : str_to_choice.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a__ ( *,
_SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = dataclasses.MISSING , _SCREAMING_SNAKE_CASE = dataclasses.MISSING , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
UpperCamelCase = {}
if aliases is not None:
UpperCamelCase = aliases
if help is not None:
UpperCamelCase = help
return dataclasses.field(metadata=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , default_factory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = 42
def __init__(self , __a , **__a ) -> Any:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
UpperCamelCase = ArgumentDefaultsHelpFormatter
super().__init__(**__a )
if dataclasses.is_dataclass(__a ):
UpperCamelCase = [dataclass_types]
UpperCamelCase = list(__a )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__a )
@staticmethod
def snake_case_ (__a , __a ) -> Optional[Any]:
UpperCamelCase = F"--{field.name}"
UpperCamelCase = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __a ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
UpperCamelCase = kwargs.pop("aliases" , [] )
if isinstance(__a , __a ):
UpperCamelCase = [aliases]
UpperCamelCase = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(__a , "UnionType" ) and isinstance(__a , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__a ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
F" Problem encountered in field '{field.name}'." )
if type(__a ) not in field.type.__args__:
# filter `str` in Union
UpperCamelCase = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
UpperCamelCase = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
UpperCamelCase = (
field.type.__args__[0] if isinstance(__a , field.type.__args__[1] ) else field.type.__args__[1]
)
UpperCamelCase = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
UpperCamelCase = {}
if origin_type is Literal or (isinstance(field.type , __a ) and issubclass(field.type , __a )):
if origin_type is Literal:
UpperCamelCase = field.type.__args__
else:
UpperCamelCase = [x.value for x in field.type]
UpperCamelCase = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
UpperCamelCase = field.default
else:
UpperCamelCase = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
UpperCamelCase = copy(__a )
# Hack because type=bool in argparse does not behave as we want.
UpperCamelCase = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
UpperCamelCase = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
UpperCamelCase = default
# This tells argparse we accept 0 or 1 value after --field_name
UpperCamelCase = "?"
# This is the value that will get picked if we do --field_name (without value)
UpperCamelCase = True
elif isclass(__a ) and issubclass(__a , __a ):
UpperCamelCase = field.type.__args__[0]
UpperCamelCase = "+"
if field.default_factory is not dataclasses.MISSING:
UpperCamelCase = field.default_factory()
elif field.default is dataclasses.MISSING:
UpperCamelCase = True
else:
UpperCamelCase = field.type
if field.default is not dataclasses.MISSING:
UpperCamelCase = field.default
elif field.default_factory is not dataclasses.MISSING:
UpperCamelCase = field.default_factory()
else:
UpperCamelCase = True
parser.add_argument(__a , *__a , **__a )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
UpperCamelCase = False
parser.add_argument(F"--no_{field.name}" , action="store_false" , dest=field.name , **__a )
def snake_case_ (self , __a ) -> List[Any]:
if hasattr(__a , "_argument_group_name" ):
UpperCamelCase = self.add_argument_group(dtype._argument_group_name )
else:
UpperCamelCase = self
try:
UpperCamelCase = get_type_hints(__a )
except NameError:
raise RuntimeError(
F"Type resolution failed for {dtype}. Try declaring the class in global scope or "
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__a ):
UpperCamelCase = ".".join(map(__a , sys.version_info[:3] ) )
raise RuntimeError(
F"Type resolution failed for {dtype} on Python {python_version}. Try removing "
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(__a ):
if not field.init:
continue
UpperCamelCase = type_hints[field.name]
self._parse_dataclass_field(__a , __a )
def snake_case_ (self , __a=None , __a=False , __a=True , __a=None , __a=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
UpperCamelCase = []
if args_filename:
args_files.append(Path(__a ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
UpperCamelCase = ArgumentParser()
args_file_parser.add_argument(__a , type=__a , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
UpperCamelCase , UpperCamelCase = args_file_parser.parse_known_args(args=__a )
UpperCamelCase = vars(__a ).get(args_file_flag.lstrip("-" ) , __a )
if cmd_args_file_paths:
args_files.extend([Path(__a ) for p in cmd_args_file_paths] )
UpperCamelCase = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
UpperCamelCase = file_args + args if args is not None else file_args + sys.argv[1:]
UpperCamelCase , UpperCamelCase = self.parse_known_args(args=__a )
UpperCamelCase = []
for dtype in self.dataclass_types:
UpperCamelCase = {f.name for f in dataclasses.fields(__a ) if f.init}
UpperCamelCase = {k: v for k, v in vars(__a ).items() if k in keys}
for k in keys:
delattr(__a , __a )
UpperCamelCase = dtype(**__a )
outputs.append(__a )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__a )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" )
return (*outputs,)
def snake_case_ (self , __a , __a = False ) -> Tuple[DataClass, ...]:
UpperCamelCase = set(args.keys() )
UpperCamelCase = []
for dtype in self.dataclass_types:
UpperCamelCase = {f.name for f in dataclasses.fields(__a ) if f.init}
UpperCamelCase = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
UpperCamelCase = dtype(**__a )
outputs.append(__a )
if not allow_extra_keys and unused_keys:
raise ValueError(F"Some keys are not used by the HfArgumentParser: {sorted(__a )}" )
return tuple(__a )
def snake_case_ (self , __a , __a = False ) -> Tuple[DataClass, ...]:
with open(Path(__a ) , encoding="utf-8" ) as open_json_file:
UpperCamelCase = json.loads(open_json_file.read() )
UpperCamelCase = self.parse_dict(__a , allow_extra_keys=__a )
return tuple(__a )
def snake_case_ (self , __a , __a = False ) -> Tuple[DataClass, ...]:
UpperCamelCase = self.parse_dict(yaml.safe_load(Path(__a ).read_text() ) , allow_extra_keys=__a )
return tuple(__a )
| 244
| 0
|
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any]="shi-labs/oneformer_demo" ) -> int:
"""simple docstring"""
with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='dataset' ) , 'r' ) as f:
__lowerCAmelCase: Optional[int] = json.load(_lowerCAmelCase )
__lowerCAmelCase: Union[str, Any] = {}
__lowerCAmelCase: Tuple = []
__lowerCAmelCase: Optional[Any] = []
for key, info in class_info.items():
__lowerCAmelCase: Tuple = info["name"]
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(_lowerCAmelCase ) )
__lowerCAmelCase: Optional[Any] = thing_ids
__lowerCAmelCase: int = class_names
return metadata
class A_ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int=7 , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : Union[str, Any]=3_0 , UpperCAmelCase : Tuple=4_0_0 , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Dict=[0.5, 0.5, 0.5] , UpperCAmelCase : Any=[0.5, 0.5, 0.5] , UpperCAmelCase : Optional[int]=1_0 , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[int]=2_5_5 , UpperCAmelCase : Optional[Any]="shi-labs/oneformer_demo" , UpperCAmelCase : Optional[Any]="ade20k_panoptic.json" , UpperCAmelCase : Optional[int]=1_0 , ) -> Tuple:
__lowerCAmelCase: Tuple = parent
__lowerCAmelCase: List[str] = batch_size
__lowerCAmelCase: Optional[int] = num_channels
__lowerCAmelCase: Tuple = min_resolution
__lowerCAmelCase: List[Any] = max_resolution
__lowerCAmelCase: Union[str, Any] = do_resize
__lowerCAmelCase: Any = {"shortest_edge": 3_2, "longest_edge": 1_3_3_3} if size is None else size
__lowerCAmelCase: Tuple = do_normalize
__lowerCAmelCase: List[str] = image_mean
__lowerCAmelCase: List[Any] = image_std
__lowerCAmelCase: Union[str, Any] = class_info_file
__lowerCAmelCase: List[Any] = prepare_metadata(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Tuple = num_text
__lowerCAmelCase: str = repo_path
# for the post_process_functions
__lowerCAmelCase: Any = 2
__lowerCAmelCase: int = 1_0
__lowerCAmelCase: Optional[int] = 1_0
__lowerCAmelCase: Tuple = 3
__lowerCAmelCase: Tuple = 4
__lowerCAmelCase: str = num_labels
__lowerCAmelCase: int = do_reduce_labels
__lowerCAmelCase: List[Any] = ignore_index
def UpperCAmelCase ( self : Optional[Any] ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Any=False ) -> List[Any]:
if not batched:
__lowerCAmelCase: List[str] = image_inputs[0]
if isinstance(UpperCAmelCase , Image.Image ):
__lowerCAmelCase: Dict = image.size
else:
__lowerCAmelCase: Tuple = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase: str = int(self.size['shortest_edge'] * h / w )
__lowerCAmelCase: Any = self.size["shortest_edge"]
elif w > h:
__lowerCAmelCase: Optional[int] = self.size["shortest_edge"]
__lowerCAmelCase: List[str] = int(self.size['shortest_edge'] * w / h )
else:
__lowerCAmelCase: List[str] = self.size["shortest_edge"]
__lowerCAmelCase: Optional[Any] = self.size["shortest_edge"]
else:
__lowerCAmelCase: Tuple = []
for image in image_inputs:
__lowerCAmelCase: Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase: Tuple = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[0] )[0]
__lowerCAmelCase: Union[str, Any] = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class A_ ( lowerCamelCase__ , unittest.TestCase ):
_lowercase : Dict = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_lowercase : List[Any] = image_processing_class
def UpperCAmelCase ( self : int ) -> int:
__lowerCAmelCase: Union[str, Any] = OneFormerImageProcessorTester(self )
@property
def UpperCAmelCase ( self : List[str] ) -> Tuple:
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
__lowerCAmelCase: Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'ignore_index' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'class_info_file' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'num_text' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'repo_path' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'metadata' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_reduce_labels' ) )
def UpperCAmelCase ( self : str ) -> List[Any]:
pass
def UpperCAmelCase ( self : int ) -> List[Any]:
__lowerCAmelCase: Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase: Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
__lowerCAmelCase: str = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
__lowerCAmelCase: str = self.image_processing_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase: Optional[Any] = self.image_processing_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
__lowerCAmelCase: List[str] = image_processor(
UpperCAmelCase , ['semantic'] * len(UpperCAmelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase: Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase: List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
__lowerCAmelCase: List[str] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
__lowerCAmelCase: List[str] = self.image_processing_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase: int = self.image_processing_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = image_processor(
UpperCAmelCase , ['semantic'] * len(UpperCAmelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
__lowerCAmelCase: List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase: List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowerCAmelCase: Any = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
__lowerCAmelCase: Tuple = self.image_processing_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase: Tuple = self.image_processing_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
__lowerCAmelCase: Any = image_processor(
UpperCAmelCase , ['semantic'] * len(UpperCAmelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Dict=False , UpperCAmelCase : str=False , UpperCAmelCase : Dict="np" ) -> str:
__lowerCAmelCase: Tuple = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__lowerCAmelCase: Tuple = self.image_processing_tester.num_labels
__lowerCAmelCase: str = None
__lowerCAmelCase: Tuple = None
__lowerCAmelCase: Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase )
if with_segmentation_maps:
__lowerCAmelCase: List[str] = num_labels
if is_instance_map:
__lowerCAmelCase: List[str] = list(range(UpperCAmelCase ) ) * 2
__lowerCAmelCase: int = dict(enumerate(UpperCAmelCase ) )
__lowerCAmelCase: List[str] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__lowerCAmelCase: int = [Image.fromarray(UpperCAmelCase ) for annotation in annotations]
__lowerCAmelCase: List[str] = image_processor(
UpperCAmelCase , ['semantic'] * len(UpperCAmelCase ) , UpperCAmelCase , return_tensors='pt' , instance_id_to_semantic_id=UpperCAmelCase , pad_and_return_pixel_mask=UpperCAmelCase , )
return inputs
def UpperCAmelCase ( self : Any ) -> List[str]:
pass
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
def common(UpperCAmelCase : Dict=False , UpperCAmelCase : Optional[int]=None ):
__lowerCAmelCase: Tuple = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCAmelCase , is_instance_map=UpperCAmelCase , segmentation_type=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = inputs["mask_labels"]
__lowerCAmelCase: List[Any] = inputs["class_labels"]
__lowerCAmelCase: Optional[Any] = inputs["pixel_values"]
__lowerCAmelCase: int = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCAmelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCAmelCase )
common(is_instance_map=UpperCAmelCase , segmentation_type='pil' )
common(is_instance_map=UpperCAmelCase , segmentation_type='pil' )
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
__lowerCAmelCase: Any = np.zeros((2_0, 5_0) )
__lowerCAmelCase: List[str] = 1
__lowerCAmelCase: int = 1
__lowerCAmelCase: Optional[Any] = 1
__lowerCAmelCase: Any = binary_mask_to_rle(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 4 )
self.assertEqual(rle[0] , 2_1 )
self.assertEqual(rle[1] , 4_5 )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase: Union[str, Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
__lowerCAmelCase: Any = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase: int = fature_extractor.post_process_semantic_segmentation(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__lowerCAmelCase: Optional[int] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__lowerCAmelCase: List[Any] = fature_extractor.post_process_semantic_segmentation(UpperCAmelCase , target_sizes=UpperCAmelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
__lowerCAmelCase: str = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase: Optional[Any] = image_processor.post_process_instance_segmentation(UpperCAmelCase , threshold=0 )
self.assertTrue(len(UpperCAmelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCAmelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase: Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
__lowerCAmelCase: List[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase: Optional[Any] = image_processor.post_process_panoptic_segmentation(UpperCAmelCase , threshold=0 )
self.assertTrue(len(UpperCAmelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCAmelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 322
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
A_ : Tuple = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> Dict:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : str = state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight" )
A_ : List[Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : Optional[Any] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Optional[Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ) -> Any:
A_ : Dict = dct.pop(_lowerCAmelCase )
A_ : List[Any] = val
def __snake_case ( _lowerCAmelCase : List[str] ) -> int:
if "handwritten" in checkpoint_url:
A_ : Any = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Any = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
A_ : List[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert("RGB" )
return im
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> List[Any]:
A_ : Optional[Any] = ViTConfig(image_size=384 , qkv_bias=_lowerCAmelCase )
A_ : Tuple = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : Tuple = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Optional[Any] = 1024
A_ : Union[str, Any] = 4096
A_ : Union[str, Any] = 24
A_ : List[Any] = 16
A_ : List[str] = 1024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Dict = False
A_ : int = "relu"
A_ : Optional[int] = 1024
A_ : Any = True
A_ : List[Any] = False
A_ : Optional[int] = False
# load HuggingFace model
A_ : Union[str, Any] = ViTModel(_lowerCAmelCase , add_pooling_layer=_lowerCAmelCase )
A_ : str = TrOCRForCausalLM(_lowerCAmelCase )
A_ : List[str] = VisionEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
A_ : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" , check_hash=_lowerCAmelCase )["model"]
A_ : Dict = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Dict = state_dict.pop(_lowerCAmelCase )
if key.startswith("decoder" ) and "output_projection" not in key:
A_ : List[str] = val
else:
A_ : Optional[Any] = val
# load state dict
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A_ : List[Any] = ViTImageProcessor(size=encoder_config.image_size )
A_ : Any = RobertaTokenizer.from_pretrained("roberta-large" )
A_ : Union[str, Any] = TrOCRProcessor(_lowerCAmelCase , _lowerCAmelCase )
A_ : List[str] = processor(images=prepare_img(_lowerCAmelCase ) , return_tensors="pt" ).pixel_values
# verify logits
A_ : Union[str, Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Optional[int] = model(pixel_values=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
A_ : Tuple = outputs.logits
A_ : Union[str, Any] = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Union[str, Any] = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : str = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _lowerCAmelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCAmelCase : List[str] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 300
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Tuple = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 'bert'
def __init__( self :Dict , a :Optional[int]=3_0_5_2_2 , a :Tuple=7_6_8 , a :List[str]=1_2 , a :Union[str, Any]=1_2 , a :Tuple=3_0_7_2 , a :Any="gelu" , a :Optional[int]=0.1 , a :str=0.1 , a :Tuple=5_1_2 , a :List[Any]=2 , a :Union[str, Any]=0.02 , a :Union[str, Any]=1E-1_2 , a :List[Any]=0 , a :int="absolute" , a :str=True , a :Dict=None , **a :List[Any] , ) -> str:
super().__init__(pad_token_id=a , **a )
__UpperCamelCase : List[Any] = vocab_size
__UpperCamelCase : Tuple = hidden_size
__UpperCamelCase : Optional[Any] = num_hidden_layers
__UpperCamelCase : Tuple = num_attention_heads
__UpperCamelCase : str = hidden_act
__UpperCamelCase : List[str] = intermediate_size
__UpperCamelCase : Optional[Any] = hidden_dropout_prob
__UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCamelCase : Union[str, Any] = max_position_embeddings
__UpperCamelCase : str = type_vocab_size
__UpperCamelCase : str = initializer_range
__UpperCamelCase : Optional[int] = layer_norm_eps
__UpperCamelCase : List[str] = position_embedding_type
__UpperCamelCase : Any = use_cache
__UpperCamelCase : int = classifier_dropout
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
@property
def _lowerCamelCase ( self :Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCamelCase : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
__UpperCamelCase : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 151
|
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
_A = 42
_A = 42
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :Optional[Any] , a :int ) -> Tuple:
__UpperCamelCase : list[list[Edge]] = [[] for _ in range(a )]
__UpperCamelCase : str = size
def __getitem__( self :str , a :int ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def _lowerCamelCase ( self :Any ) -> List[str]:
return self._size
def _lowerCamelCase ( self :Dict , a :int , a :int , a :int ) -> Any:
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(a , a ) )
def _lowerCamelCase ( self :List[str] , a :int , a :int ) -> int | None:
__UpperCamelCase : Union[str, Any] = deque([start_vertex] )
__UpperCamelCase : list[int | None] = [None] * self.size
__UpperCamelCase : Dict = 0
while queue:
__UpperCamelCase : Tuple = queue.popleft()
__UpperCamelCase : int = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__UpperCamelCase : Optional[Any] = current_distance + edge.weight
__UpperCamelCase : Dict = distances[edge.destination_vertex]
if (
isinstance(a , a )
and new_distance >= dest_vertex_distance
):
continue
__UpperCamelCase : Optional[Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151
| 1
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class snake_case ( __snake_case ):
def __init__( self : Dict , UpperCamelCase__ : str)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: List[Any] = data
def __iter__( self : Dict)-> int:
'''simple docstring'''
for element in self.data:
yield element
def a__ ( __SCREAMING_SNAKE_CASE=True ) -> List[Any]:
__lowerCAmelCase: Dict = Accelerator(even_batches=__SCREAMING_SNAKE_CASE )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ) -> Dict:
if iterable:
__lowerCAmelCase: Any = DummyIterableDataset(torch.as_tensor(range(__SCREAMING_SNAKE_CASE ) ) )
else:
__lowerCAmelCase: Union[str, Any] = TensorDataset(torch.as_tensor(range(__SCREAMING_SNAKE_CASE ) ) )
__lowerCAmelCase: Any = DataLoader(__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[str] = accelerator.prepare(__SCREAMING_SNAKE_CASE )
return dl
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> int:
__lowerCAmelCase: Tuple = create_dataloader(accelerator=__SCREAMING_SNAKE_CASE , dataset_size=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def a__ ( ) -> Optional[Any]:
__lowerCAmelCase: str = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__SCREAMING_SNAKE_CASE , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def a__ ( ) -> List[Any]:
__lowerCAmelCase: str = create_accelerator(even_batches=__SCREAMING_SNAKE_CASE )
verify_dataloader_batch_sizes(
__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__SCREAMING_SNAKE_CASE , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def a__ ( ) -> Optional[int]:
__lowerCAmelCase: Tuple = create_accelerator(even_batches=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = torch.nn.Linear(1 , 1 )
__lowerCAmelCase: Dict = accelerator.prepare(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = create_dataloader(__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
__lowerCAmelCase: Optional[Any] = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Union[str, Any] = ddp_model(batch[0].float() )
__lowerCAmelCase: int = output.sum()
loss.backward()
batch_idxs.append(__SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def a__ ( __SCREAMING_SNAKE_CASE ) -> List[str]:
with warnings.catch_warnings(record=__SCREAMING_SNAKE_CASE ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __SCREAMING_SNAKE_CASE )
assert "only supported for multi-GPU" in str(w[-1].message )
def a__ ( ) -> List[Any]:
__lowerCAmelCase: Optional[Any] = True
__lowerCAmelCase: Optional[int] = False
__lowerCAmelCase: List[str] = create_accelerator(even_batches=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = torch.nn.Linear(1 , 1 )
__lowerCAmelCase: List[str] = accelerator.prepare(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = create_dataloader(__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
__lowerCAmelCase: int = create_dataloader(__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Union[str, Any] = train_dl.batch_sampler.even_batches
__lowerCAmelCase: str = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> List[Any]:
__lowerCAmelCase: List[Any] = True
__lowerCAmelCase: Tuple = False
__lowerCAmelCase: Union[str, Any] = create_accelerator(even_batches=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[Any] = torch.nn.Linear(1 , 1 )
__lowerCAmelCase: Union[str, Any] = accelerator.prepare(__SCREAMING_SNAKE_CASE )
create_dataloader(__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , iterable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = create_dataloader(__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Dict = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> Optional[int]:
__lowerCAmelCase: Any = create_accelerator()
__lowerCAmelCase: str = torch.nn.Linear(1 , 1 )
__lowerCAmelCase: Tuple = accelerator.prepare(__SCREAMING_SNAKE_CASE )
create_dataloader(__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , iterable=__SCREAMING_SNAKE_CASE )
with warnings.catch_warnings(record=__SCREAMING_SNAKE_CASE ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__SCREAMING_SNAKE_CASE ):
pass
assert issubclass(w[-1].category , __SCREAMING_SNAKE_CASE )
assert "only supported for map-style datasets" in str(w[-1].message )
def a__ ( ) -> List[Any]:
__lowerCAmelCase: str = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
__lowerCAmelCase: Tuple = accelerator.state.distributed_type
__lowerCAmelCase: Dict = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[Any] = original_state
if __name__ == "__main__":
main()
| 217
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class snake_case :
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : str=1_3 , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Any=False , UpperCamelCase__ : str=True , UpperCamelCase__ : List[Any]=9_9 , UpperCamelCase__ : Optional[int]=3_2 , UpperCamelCase__ : List[Any]=5 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : List[str]=3_7 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : List[Any]=1_6 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : Optional[int]=None , )-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Tuple = parent
__lowerCAmelCase: Optional[int] = batch_size
__lowerCAmelCase: int = seq_length
__lowerCAmelCase: Any = is_training
__lowerCAmelCase: List[Any] = use_input_mask
__lowerCAmelCase: Any = use_token_type_ids
__lowerCAmelCase: Dict = use_labels
__lowerCAmelCase: Union[str, Any] = vocab_size
__lowerCAmelCase: Union[str, Any] = hidden_size
__lowerCAmelCase: int = num_hidden_layers
__lowerCAmelCase: List[Any] = num_attention_heads
__lowerCAmelCase: int = intermediate_size
__lowerCAmelCase: Optional[Any] = hidden_act
__lowerCAmelCase: Optional[Any] = hidden_dropout_prob
__lowerCAmelCase: Optional[int] = attention_probs_dropout_prob
__lowerCAmelCase: Any = max_position_embeddings
__lowerCAmelCase: Optional[int] = type_vocab_size
__lowerCAmelCase: str = type_sequence_label_size
__lowerCAmelCase: int = initializer_range
__lowerCAmelCase: Dict = num_labels
__lowerCAmelCase: Dict = num_choices
__lowerCAmelCase: str = scope
def lowercase_ ( self : Optional[Any])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase: List[Any] = None
if self.use_input_mask:
__lowerCAmelCase: int = random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase: Dict = None
if self.use_token_type_ids:
__lowerCAmelCase: Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__lowerCAmelCase: str = None
__lowerCAmelCase: Any = None
__lowerCAmelCase: Dict = None
if self.use_labels:
__lowerCAmelCase: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase: Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size] , self.num_choices)
__lowerCAmelCase: Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : str)-> Dict:
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any])-> str:
'''simple docstring'''
__lowerCAmelCase: List[Any] = OpenLlamaModel(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Union[str, Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)
__lowerCAmelCase: int = model(UpperCamelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , )-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = True
__lowerCAmelCase: Any = OpenLlamaModel(UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: List[Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
__lowerCAmelCase: Optional[int] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
__lowerCAmelCase: Optional[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowercase_ ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , )-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: str = OpenLlamaForCausalLM(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowercase_ ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , )-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = True
__lowerCAmelCase: Dict = True
__lowerCAmelCase: Union[str, Any] = OpenLlamaForCausalLM(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
# first forward pass
__lowerCAmelCase: Optional[int] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
__lowerCAmelCase: Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCAmelCase: Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size)
__lowerCAmelCase: Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__lowerCAmelCase: str = torch.cat([input_ids, next_tokens] , dim=-1)
__lowerCAmelCase: List[str] = torch.cat([input_mask, next_mask] , dim=-1)
__lowerCAmelCase: Union[str, Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["hidden_states"][0]
__lowerCAmelCase: List[str] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["hidden_states"][0]
# select random slice
__lowerCAmelCase: List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__lowerCAmelCase: List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCAmelCase: Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3))
def lowercase_ ( self : Tuple)-> str:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): List[Any] = config_and_inputs
__lowerCAmelCase: Any = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __snake_case, __snake_case, __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Tuple = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : Any = (OpenLlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[int] = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
def lowercase_ ( self : Dict)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: int = OpenLlamaModelTester(self)
__lowerCAmelCase: Optional[Any] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7)
def lowercase_ ( self : List[str])-> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Union[str, Any])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__)
def lowercase_ ( self : int)-> str:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase: Union[str, Any] = type
self.model_tester.create_and_check_model(*UpperCamelCase__)
def lowercase_ ( self : Tuple)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: Dict = 3
__lowerCAmelCase: Optional[Any] = input_dict["input_ids"]
__lowerCAmelCase: Optional[Any] = input_ids.ne(1).to(UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__lowerCAmelCase: Dict = OpenLlamaForSequenceClassification(UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def lowercase_ ( self : Dict)-> Tuple:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: int = 3
__lowerCAmelCase: Dict = "single_label_classification"
__lowerCAmelCase: str = input_dict["input_ids"]
__lowerCAmelCase: Tuple = input_ids.ne(1).to(UpperCamelCase__)
__lowerCAmelCase: Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__lowerCAmelCase: List[Any] = OpenLlamaForSequenceClassification(UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Optional[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def lowercase_ ( self : Optional[int])-> Any:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: Tuple = 3
__lowerCAmelCase: Any = "multi_label_classification"
__lowerCAmelCase: str = input_dict["input_ids"]
__lowerCAmelCase: Optional[int] = input_ids.ne(1).to(UpperCamelCase__)
__lowerCAmelCase: Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__lowerCAmelCase: Dict = OpenLlamaForSequenceClassification(UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Union[str, Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test")
def lowercase_ ( self : Any)-> Tuple:
'''simple docstring'''
pass
@parameterized.expand([("linear",), ("dynamic",)])
def lowercase_ ( self : Any , UpperCamelCase__ : List[str])-> Dict:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: Any = ids_tensor([1, 1_0] , config.vocab_size)
__lowerCAmelCase: Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__lowerCAmelCase: List[Any] = OpenLlamaModel(UpperCamelCase__)
original_model.to(UpperCamelCase__)
original_model.eval()
__lowerCAmelCase: int = original_model(UpperCamelCase__).last_hidden_state
__lowerCAmelCase: str = original_model(UpperCamelCase__).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__lowerCAmelCase: Dict = {"type": scaling_type, "factor": 10.0}
__lowerCAmelCase: List[str] = OpenLlamaModel(UpperCamelCase__)
scaled_model.to(UpperCamelCase__)
scaled_model.eval()
__lowerCAmelCase: Dict = scaled_model(UpperCamelCase__).last_hidden_state
__lowerCAmelCase: Any = scaled_model(UpperCamelCase__).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5))
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5))
| 217
| 1
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(_a )
class __SCREAMING_SNAKE_CASE ( _a ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _lowerCamelCase ( self , __lowerCAmelCase=None ):
UpperCamelCase__ = {}
if top_k is not None:
UpperCamelCase__ = top_k
return {}, {}, postprocess_params
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ):
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = load_image(__lowerCAmelCase )
UpperCamelCase__ = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = self.model(**__lowerCAmelCase )
return model_outputs
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=5 ):
if top_k > self.model.config.num_labels:
UpperCamelCase__ = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase__ = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase__ , UpperCamelCase__ = probs.topk(__lowerCAmelCase )
elif self.framework == "tf":
UpperCamelCase__ = stable_softmax(model_outputs.logits , axis=-1 )[0]
UpperCamelCase__ = tf.math.top_k(__lowerCAmelCase , k=__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCamelCase__ = scores.tolist()
UpperCamelCase__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCAmelCase , __lowerCAmelCase )]
| 87
|
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
UpperCamelCase__ = "sshleifer/mar_enro_6_3_student"
class __SCREAMING_SNAKE_CASE ( _a ):
def _lowerCamelCase ( self ):
super().setUp()
UpperCamelCase__ = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=__lowerCAmelCase , )
UpperCamelCase__ = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def _lowerCamelCase ( self ):
MarianMTModel.from_pretrained(__lowerCAmelCase )
@slow
@require_torch_gpu
def _lowerCamelCase ( self ):
UpperCamelCase__ = {
"""$MAX_LEN""": 64,
"""$BS""": 64,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
UpperCamelCase__ = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
UpperCamelCase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
UpperCamelCase__ = bash_script.replace(__lowerCAmelCase , str(__lowerCAmelCase ) )
UpperCamelCase__ = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCamelCase__ = f"""
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
""".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCamelCase__ = ["""finetune.py"""] + bash_script.split() + args
with patch.object(__lowerCAmelCase , """argv""" , __lowerCAmelCase ):
UpperCamelCase__ = argparse.ArgumentParser()
UpperCamelCase__ = pl.Trainer.add_argparse_args(__lowerCAmelCase )
UpperCamelCase__ = SummarizationModule.add_model_specific_args(__lowerCAmelCase , os.getcwd() )
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = main(__lowerCAmelCase )
# Check metrics
UpperCamelCase__ = load_json(model.metrics_save_path )
UpperCamelCase__ = metrics["""val"""][0]
UpperCamelCase__ = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , __lowerCAmelCase )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCamelCase__ = os.listdir(__lowerCAmelCase )
UpperCamelCase__ = [x for x in contents if x.endswith(""".ckpt""" )][0]
UpperCamelCase__ = os.path.join(args.output_dir , __lowerCAmelCase )
UpperCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
UpperCamelCase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCamelCase__ = {os.path.basename(__lowerCAmelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class __SCREAMING_SNAKE_CASE ( _a ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def _lowerCamelCase ( self ):
UpperCamelCase__ = f"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
UpperCamelCase__ = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 128,
"""$BS""": 16,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
UpperCamelCase__ = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
UpperCamelCase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
UpperCamelCase__ = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
UpperCamelCase__ = bash_script.replace(__lowerCAmelCase , str(__lowerCAmelCase ) )
UpperCamelCase__ = self.get_auto_remove_tmp_dir()
UpperCamelCase__ = bash_script.replace("""--fp16""" , """""" )
UpperCamelCase__ = 6
UpperCamelCase__ = (
["""distillation.py"""]
+ bash_script.split()
+ [
f"""--output_dir={output_dir}""",
"""--gpus=1""",
"""--learning_rate=1e-3""",
f"""--num_train_epochs={epochs}""",
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(__lowerCAmelCase , """argv""" , __lowerCAmelCase ):
UpperCamelCase__ = argparse.ArgumentParser()
UpperCamelCase__ = pl.Trainer.add_argparse_args(__lowerCAmelCase )
UpperCamelCase__ = SummarizationDistiller.add_model_specific_args(__lowerCAmelCase , os.getcwd() )
UpperCamelCase__ = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCamelCase__ = distill_main(__lowerCAmelCase )
# Check metrics
UpperCamelCase__ = load_json(model.metrics_save_path )
UpperCamelCase__ = metrics["""val"""][0]
UpperCamelCase__ = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , __lowerCAmelCase )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCamelCase__ = os.listdir(__lowerCAmelCase )
UpperCamelCase__ = [x for x in contents if x.endswith(""".ckpt""" )][0]
UpperCamelCase__ = os.path.join(args.output_dir , __lowerCAmelCase )
UpperCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
UpperCamelCase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCamelCase__ = {os.path.basename(__lowerCAmelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 87
| 1
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = IFImgaImgSuperResolutionPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
_a = PipelineTesterMixin.required_optional_params - {"latents"}
def a__ ( self ) -> List[Any]:
return self._get_superresolution_dummy_components()
def a__ ( self , _a , _a=0 ) -> Any:
if str(_a ).startswith("""mps""" ):
_A : Tuple = torch.manual_seed(_a )
else:
_A : Tuple = torch.Generator(device=_a ).manual_seed(_a )
_A : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
_A : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a )
_A : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def a__ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def a__ ( self ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def a__ ( self ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def a__ ( self ) -> Union[str, Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def a__ ( self ) -> List[Any]:
self._test_save_load_local()
def a__ ( self ) -> str:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 26
|
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = 'hf-internal-testing/tiny-random-t5'
lowerCAmelCase__ :List[Any] = AutoTokenizer.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :str = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Any = tokenizer('This is me' , return_tensors='pt' )
lowerCAmelCase__ :Dict = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowerCAmelCase__ :Optional[Any] = model.generate(**__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Any = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowerCAmelCase__ :Union[str, Any] = model_reloaded.generate(**__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = 'hf-internal-testing/tiny-random-t5'
lowerCAmelCase__ :Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :str = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__UpperCAmelCase ):
model.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = model.reverse_bettertransformer()
model.save_pretrained(__UpperCAmelCase )
| 293
| 0
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class _snake_case ( lowercase__):
def A__ ( self : Tuple ):
lowercase__ = tempfile.mkdtemp()
lowercase__ = 8
# DPR tok
lowercase__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase__ = os.path.join(self.tmpdirname, "dpr_tokenizer" )
os.makedirs(__lowercase, exist_ok=__lowercase )
lowercase__ = os.path.join(__lowercase, DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowercase__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ = dict(zip(__lowercase, range(len(__lowercase ) ) ) )
lowercase__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ = {"unk_token": "<unk>"}
lowercase__ = os.path.join(self.tmpdirname, "bart_tokenizer" )
os.makedirs(__lowercase, exist_ok=__lowercase )
lowercase__ = os.path.join(__lowercase, BART_VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ = os.path.join(__lowercase, BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowercase ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(__lowercase ) )
def A__ ( self : Tuple ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer" ) )
def A__ ( self : Tuple ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer" ) )
def A__ ( self : str ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer" ) )
def A__ ( self : Any ):
shutil.rmtree(self.tmpdirname )
def A__ ( self : List[str] ):
lowercase__ = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def A__ ( self : str ):
lowercase__ = self.get_dummy_dataset()
lowercase__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ = dataset
lowercase__ = RagRetriever(
__lowercase, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), )
return retriever
def A__ ( self : Any, __lowercase : bool ):
lowercase__ = self.get_dummy_dataset()
lowercase__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name="custom", )
if from_disk:
lowercase__ = os.path.join(self.tmpdirname, "dataset" )
lowercase__ = os.path.join(self.tmpdirname, "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname, "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname, "dataset" ) )
del dataset
lowercase__ = RagRetriever(
__lowercase, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), )
else:
lowercase__ = RagRetriever(
__lowercase, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), index=CustomHFIndex(config.retrieval_vector_size, __lowercase ), )
return retriever
def A__ ( self : Optional[int] ):
lowercase__ = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase__ = os.path.join(self.tmpdirname, "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings", index_file_name + ".index.dpr" )
pickle.dump(dataset["id"], open(index_file_name + ".index_meta.dpr", "wb" ) )
lowercase__ = os.path.join(self.tmpdirname, "psgs_w100.tsv.pkl" )
lowercase__ = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(__lowercase, open(__lowercase, "wb" ) )
lowercase__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name="legacy", index_path=self.tmpdirname, )
lowercase__ = RagRetriever(
__lowercase, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def A__ ( self : Union[str, Any] ):
lowercase__ = 1
lowercase__ = self.get_dummy_canonical_hf_index_retriever()
lowercase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ = retriever.retrieve(__lowercase, n_docs=__lowercase )
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowercase ), 2 )
self.assertEqual(sorted(doc_dicts[0] ), ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ), __lowercase )
self.assertEqual(doc_dicts[0]["id"][0], "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0], "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]] )
def A__ ( self : int ):
lowercase__ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ = self.get_dummy_dataset()
retriever.save_pretrained(__lowercase )
lowercase__ = RagRetriever.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase, __lowercase )
lowercase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowercase__ = retriever.retrieve(__lowercase, n_docs=1 )
self.assertTrue(out is not None )
def A__ ( self : Union[str, Any] ):
lowercase__ = 1
lowercase__ = self.get_dummy_custom_hf_index_retriever(from_disk=__lowercase )
lowercase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ = retriever.retrieve(__lowercase, n_docs=__lowercase )
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowercase ), 2 )
self.assertEqual(sorted(doc_dicts[0] ), ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ), __lowercase )
self.assertEqual(doc_dicts[0]["id"][0], "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0], "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]] )
def A__ ( self : int ):
lowercase__ = self.get_dummy_custom_hf_index_retriever(from_disk=__lowercase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__lowercase )
lowercase__ = RagRetriever.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase, __lowercase )
lowercase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowercase__ = retriever.retrieve(__lowercase, n_docs=1 )
self.assertTrue(out is not None )
def A__ ( self : Any ):
lowercase__ = 1
lowercase__ = self.get_dummy_custom_hf_index_retriever(from_disk=__lowercase )
lowercase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ = retriever.retrieve(__lowercase, n_docs=__lowercase )
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowercase ), 2 )
self.assertEqual(sorted(doc_dicts[0] ), ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ), __lowercase )
self.assertEqual(doc_dicts[0]["id"][0], "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0], "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]] )
def A__ ( self : Optional[int] ):
lowercase__ = self.get_dummy_custom_hf_index_retriever(from_disk=__lowercase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__lowercase )
lowercase__ = RagRetriever.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase, __lowercase )
lowercase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowercase__ = retriever.retrieve(__lowercase, n_docs=1 )
self.assertTrue(out is not None )
def A__ ( self : Dict ):
lowercase__ = 1
lowercase__ = self.get_dummy_legacy_index_retriever()
lowercase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ = retriever.retrieve(__lowercase, n_docs=__lowercase )
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowercase ), 2 )
self.assertEqual(sorted(doc_dicts[0] ), ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ), __lowercase )
self.assertEqual(doc_dicts[0]["text"][0], "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0], "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]] )
def A__ ( self : str ):
lowercase__ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__lowercase )
lowercase__ = RagRetriever.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase, __lowercase )
lowercase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowercase__ = retriever.retrieve(__lowercase, n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def A__ ( self : Dict ):
import torch
lowercase__ = 1
lowercase__ = self.get_dummy_canonical_hf_index_retriever()
lowercase__ = [[5, 7], [10, 11]]
lowercase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowercase__ = retriever(__lowercase, __lowercase, prefix=retriever.config.generator.prefix, n_docs=__lowercase )
lowercase__ , lowercase__ , lowercase__ = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__lowercase, __lowercase )
self.assertIsInstance(__lowercase, __lowercase )
self.assertIsInstance(__lowercase, np.ndarray )
lowercase__ = retriever(
__lowercase, __lowercase, prefix=retriever.config.generator.prefix, n_docs=__lowercase, return_tensors="pt", )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__lowercase, torch.Tensor )
self.assertIsInstance(__lowercase, torch.Tensor )
self.assertIsInstance(__lowercase, torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def A__ ( self : Any ):
lowercase__ = self.get_dpr_ctx_encoder_tokenizer()
lowercase__ = 1
lowercase__ = self.get_dummy_custom_hf_index_retriever(from_disk=__lowercase )
retriever.set_ctx_encoder_tokenizer(__lowercase )
lowercase__ = [[5, 7], [10, 11]]
lowercase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowercase__ = retriever(__lowercase, __lowercase, prefix=retriever.config.generator.prefix, n_docs=__lowercase )
self.assertEqual(
len(__lowercase ), 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ), __lowercase ) # check for doc token related keys in dictionary.
| 224
|
import os
from collections.abc import Iterator
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "." ):
for dir_path, dir_names, filenames in os.walk(SCREAMING_SNAKE_CASE_ ):
lowercase__ = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(SCREAMING_SNAKE_CASE_ )[1] in (".py", ".ipynb"):
yield os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).lstrip("./" )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return f'''{i * " "}*''' if i else "\n##"
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(SCREAMING_SNAKE_CASE_ ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(SCREAMING_SNAKE_CASE_ )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "." ):
lowercase__ = ""
for filepath in sorted(good_file_paths(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ , lowercase__ = os.path.split(SCREAMING_SNAKE_CASE_ )
if filepath != old_path:
lowercase__ = print_path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase__ = f'''{filepath}/{filename}'''.replace(" " , "%20" )
lowercase__ = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(f'''{md_prefix(SCREAMING_SNAKE_CASE_ )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(""".""")
| 224
| 1
|
from __future__ import annotations
import numpy as np
def a ( snake_case__: list[float] ):
'''simple docstring'''
return np.maximum(0 , __a )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 30
|
from collections.abc import Generator
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = 0, 1
while True:
UpperCamelCase__ , UpperCamelCase__ = b, a + b
yield b
def __magic_name__ ( __a : int = 1_000 ):
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = fibonacci_generator()
while len(str(next(__a ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 244
| 0
|
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_A = logging.get_logger(__name__)
class _lowercase ( enum.Enum ):
lowercase_ = 0
lowercase_ = 1
@add_end_docstrings(__UpperCAmelCase )
class _lowercase ( __UpperCAmelCase ):
lowercase_ = 'generated'
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> Union[str, Any]:
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _UpperCamelCase ( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ) -> Optional[Any]:
lowerCamelCase : Tuple = {}
if truncation is not None:
lowerCamelCase : Tuple = truncation
lowerCamelCase : Union[str, Any] = generate_kwargs
lowerCamelCase : List[str] = {}
if return_tensors is not None and return_type is None:
lowerCamelCase : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowerCamelCase : Optional[int] = return_type
if clean_up_tokenization_spaces is not None:
lowerCamelCase : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCamelCase : int = self.tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
lowerCamelCase : str = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> List[str]:
return True
def _UpperCamelCase ( self , *UpperCAmelCase_ , UpperCAmelCase_ ) -> Optional[Any]:
lowerCamelCase : Optional[int] = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] , UpperCAmelCase_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
lowerCamelCase : Any = ([prefix + arg for arg in args[0]],)
lowerCamelCase : Tuple = True
elif isinstance(args[0] , UpperCAmelCase_ ):
lowerCamelCase : Optional[int] = (prefix + args[0],)
lowerCamelCase : Tuple = False
else:
raise ValueError(
F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
lowerCamelCase : Optional[Any] = self.tokenizer(*UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> Optional[Any]:
lowerCamelCase : List[Any] = super().__call__(*UpperCAmelCase_ , **UpperCAmelCase_ )
if (
isinstance(args[0] , UpperCAmelCase_ )
and all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for el in args[0] )
and all(len(UpperCAmelCase_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_=TruncationStrategy.DO_NOT_TRUNCATE , **UpperCAmelCase_ ) -> Dict:
lowerCamelCase : List[Any] = self._parse_and_tokenize(UpperCAmelCase_ , truncation=UpperCAmelCase_ , **UpperCAmelCase_ )
return inputs
def _UpperCamelCase ( self , UpperCAmelCase_ , **UpperCAmelCase_ ) -> Union[str, Any]:
if self.framework == "pt":
lowerCamelCase , lowerCamelCase : Tuple = model_inputs['input_ids'].shape
elif self.framework == "tf":
lowerCamelCase , lowerCamelCase : Any = tf.shape(model_inputs['input_ids'] ).numpy()
lowerCamelCase : List[Any] = generate_kwargs.get('min_length' , self.model.config.min_length )
lowerCamelCase : Dict = generate_kwargs.get('max_length' , self.model.config.max_length )
self.check_inputs(UpperCAmelCase_ , generate_kwargs['min_length'] , generate_kwargs['max_length'] )
lowerCamelCase : Any = self.model.generate(**UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase : str = output_ids.shape[0]
if self.framework == "pt":
lowerCamelCase : Tuple = output_ids.reshape(UpperCAmelCase_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowerCamelCase : Tuple = tf.reshape(UpperCAmelCase_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_=ReturnType.TEXT , UpperCAmelCase_=False ) -> Tuple:
lowerCamelCase : Union[str, Any] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowerCamelCase : Any = {F"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
lowerCamelCase : str = {
F"""{self.return_name}_text""": self.tokenizer.decode(
UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , )
}
records.append(UpperCAmelCase_ )
return records
@add_end_docstrings(__UpperCAmelCase )
class _lowercase ( __UpperCAmelCase ):
lowercase_ = 'summary'
def __call__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> Any:
return super().__call__(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> bool:
if max_length < min_length:
logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'a summarization task, where outputs shorter than the input are typically wanted, you might '
F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(__UpperCAmelCase )
class _lowercase ( __UpperCAmelCase ):
lowercase_ = 'translation'
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> str:
if input_length > 0.9 * max_length:
logger.warning(
F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def _UpperCamelCase ( self , *UpperCAmelCase_ , UpperCAmelCase_=TruncationStrategy.DO_NOT_TRUNCATE , UpperCAmelCase_=None , UpperCAmelCase_=None ) -> List[Any]:
if getattr(self.tokenizer , '_build_translation_inputs' , UpperCAmelCase_ ):
return self.tokenizer._build_translation_inputs(
*UpperCAmelCase_ , return_tensors=self.framework , truncation=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ )
else:
return super()._parse_and_tokenize(*UpperCAmelCase_ , truncation=UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ ) -> List[Any]:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Dict = super()._sanitize_parameters(**UpperCAmelCase_ )
if src_lang is not None:
lowerCamelCase : Dict = src_lang
if tgt_lang is not None:
lowerCamelCase : List[str] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowerCamelCase : Tuple = kwargs.get('task' , self.task )
lowerCamelCase : str = task.split('_' )
if task and len(UpperCAmelCase_ ) == 4:
# translation, XX, to YY
lowerCamelCase : List[Any] = items[1]
lowerCamelCase : str = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> Union[str, Any]:
return super().__call__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 205
|
"""simple docstring"""
import numpy as np
def UpperCAmelCase ( a_, a_, a_ = 1E-12, a_ = 100, ):
'''simple docstring'''
assert np.shape(a_ )[0] == np.shape(a_ )[1]
# Ensure proper dimensionality.
assert np.shape(a_ )[0] == np.shape(a_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(a_ ) == np.iscomplexobj(a_ )
lowerCamelCase : Optional[int] = np.iscomplexobj(a_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(a_, input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : List[str] = 0
lowerCamelCase : Any = 0
lowerCamelCase : Dict = 1E12
while not convergence:
# Multiple matrix by the vector.
lowerCamelCase : Optional[int] = np.dot(a_, a_ )
# Normalize the resulting output vector.
lowerCamelCase : Optional[int] = w / np.linalg.norm(a_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCamelCase : Optional[Any] = vector.conj().T if is_complex else vector.T
lowerCamelCase : str = np.dot(a_, np.dot(a_, a_ ) )
# Check convergence.
lowerCamelCase : Optional[int] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCamelCase : int = True
lowerCamelCase : Optional[Any] = lambda_
if is_complex:
lowerCamelCase : Any = np.real(lambda_ )
return lambda_, vector
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCamelCase : str = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowerCamelCase : str = np.array([41, 4, 20] )
lowerCamelCase : Optional[Any] = real_input_matrix.astype(np.complexaaa )
lowerCamelCase : Dict = np.triu(1j * complex_input_matrix, 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCamelCase : List[Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCamelCase : str = real_input_matrix
lowerCamelCase : Any = real_vector
elif problem_type == "complex":
lowerCamelCase : str = complex_input_matrix
lowerCamelCase : Dict = complex_vector
# Our implementation.
lowerCamelCase , lowerCamelCase : List[str] = power_iteration(a_, a_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCamelCase , lowerCamelCase : Optional[Any] = np.linalg.eigh(a_ )
# Last eigenvalue is the maximum one.
lowerCamelCase : Dict = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCamelCase : List[str] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(a_ ) - np.abs(a_ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 205
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase__ = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[PIL.Image.Image, np.ndarray]
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : PriorTransformer , lowercase_ : CLIPVisionModel , lowercase_ : CLIPImageProcessor , lowercase_ : HeunDiscreteScheduler , lowercase_ : ShapERenderer , ) -> List[Any]:
super().__init__()
self.register_modules(
prior=lowercase_ , image_encoder=lowercase_ , image_processor=lowercase_ , scheduler=lowercase_ , renderer=lowercase_ , )
def UpperCAmelCase_ ( self : Any , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Optional[int] ) -> Optional[Any]:
if latents is None:
UpperCAmelCase : List[str] = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase : List[Any] = latents.to(lowercase_ )
UpperCAmelCase : List[str] = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : List[Any]=0 ) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase : str = torch.device(f"""cuda:{gpu_id}""" )
UpperCAmelCase : List[str] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def UpperCAmelCase_ ( self : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : Optional[Any] , ) -> Optional[Any]:
if isinstance(lowercase_ , lowercase_ ) and isinstance(image[0] , torch.Tensor ):
UpperCAmelCase : Optional[Any] = torch.cat(lowercase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(lowercase_ , axis=0 )
if not isinstance(lowercase_ , torch.Tensor ):
UpperCAmelCase : Union[str, Any] = self.image_processor(lowercase_ , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
UpperCAmelCase : Dict = image.to(dtype=self.image_encoder.dtype , device=lowercase_ )
UpperCAmelCase : List[str] = self.image_encoder(lowercase_ )['last_hidden_state']
UpperCAmelCase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
UpperCAmelCase : Union[str, Any] = image_embeds.repeat_interleave(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase : List[Any] = torch.zeros_like(lowercase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase : Tuple = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self : Optional[Any] , lowercase_ : Union[PIL.Image.Image, List[PIL.Image.Image]] , lowercase_ : int = 1 , lowercase_ : int = 25 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : float = 4.0 , lowercase_ : int = 64 , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ) -> Dict:
if isinstance(lowercase_ , PIL.Image.Image ):
UpperCAmelCase : List[str] = 1
elif isinstance(lowercase_ , torch.Tensor ):
UpperCAmelCase : int = image.shape[0]
elif isinstance(lowercase_ , lowercase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
UpperCAmelCase : List[str] = len(lowercase_ )
else:
raise ValueError(
f"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase_ )}""" )
UpperCAmelCase : Optional[Any] = self._execution_device
UpperCAmelCase : str = batch_size * num_images_per_prompt
UpperCAmelCase : Any = guidance_scale > 1.0
UpperCAmelCase : str = self._encode_image(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# prior
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps
UpperCAmelCase : Tuple = self.prior.config.num_embeddings
UpperCAmelCase : List[str] = self.prior.config.embedding_dim
UpperCAmelCase : Optional[Any] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
UpperCAmelCase : Dict = latents.reshape(latents.shape[0] , lowercase_ , lowercase_ )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase : List[str] = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
UpperCAmelCase : List[str] = self.prior(
lowercase_ , timestep=lowercase_ , proj_embedding=lowercase_ , ).predicted_image_embedding
# remove the variance
UpperCAmelCase , UpperCAmelCase : str = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
UpperCAmelCase , UpperCAmelCase : int = noise_pred.chunk(2 )
UpperCAmelCase : Any = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
UpperCAmelCase : List[str] = self.scheduler.step(
lowercase_ , timestep=lowercase_ , sample=lowercase_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase_ )
UpperCAmelCase : int = []
for i, latent in enumerate(lowercase_ ):
print()
UpperCAmelCase : int = self.renderer.decode(
latent[None, :] , lowercase_ , size=lowercase_ , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(lowercase_ )
UpperCAmelCase : int = torch.stack(lowercase_ )
if output_type not in ["np", "pil"]:
raise ValueError(f"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
UpperCAmelCase : int = images.cpu().numpy()
if output_type == "pil":
UpperCAmelCase : Dict = [self.numpy_to_pil(lowercase_ ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase_ )
| 151
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : Optional[Any] ) -> None:
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 151
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = 4_2
_a = None
# Automatically constructed
_a = 'dict'
_a = None
_a = field(default='Translation' , init=lowerCAmelCase_ , repr=lowerCAmelCase_ )
def __call__( self : Optional[int] )-> Optional[int]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def snake_case ( self : Optional[Any] )-> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = None
_a = None
_a = None
# Automatically constructed
_a = 'dict'
_a = None
_a = field(default='TranslationVariableLanguages' , init=lowerCAmelCase_ , repr=lowerCAmelCase_ )
def snake_case ( self : Tuple )-> Dict:
lowerCamelCase__ : List[str] =sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase__ : Optional[int] =len(self.languages ) if self.languages else None
def __call__( self : Dict )-> str:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def snake_case ( self : int, lowerCamelCase : Optional[Any] )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =set(self.languages )
if self.languages and set(lowerCamelCase ) - lang_set:
raise ValueError(
F'''Some languages in example ({", ".join(sorted(set(lowerCamelCase ) - lang_set ) )}) are not in valid set ({", ".join(lowerCamelCase )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase__ : int =[]
for lang, text in translation_dict.items():
if isinstance(lowerCamelCase, lowerCamelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase__ : Tuple =zip(*sorted(lowerCamelCase ) )
return {"language": languages, "translation": translations}
def snake_case ( self : List[str] )-> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 363
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'SpeechT5FeatureExtractor'
_a = 'SpeechT5Tokenizer'
def __init__( self : Dict, lowerCamelCase : Optional[int], lowerCamelCase : str )-> Any:
super().__init__(lowerCamelCase, lowerCamelCase )
def __call__( self : Tuple, *lowerCamelCase : List[str], **lowerCamelCase : Optional[int] )-> List[str]:
lowerCamelCase__ : List[Any] =kwargs.pop('''audio''', lowerCamelCase )
lowerCamelCase__ : List[str] =kwargs.pop('''text''', lowerCamelCase )
lowerCamelCase__ : int =kwargs.pop('''text_target''', lowerCamelCase )
lowerCamelCase__ : Dict =kwargs.pop('''audio_target''', lowerCamelCase )
lowerCamelCase__ : Any =kwargs.pop('''sampling_rate''', lowerCamelCase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] =self.feature_extractor(lowerCamelCase, *lowerCamelCase, sampling_rate=lowerCamelCase, **lowerCamelCase )
elif text is not None:
lowerCamelCase__ : List[Any] =self.tokenizer(lowerCamelCase, **lowerCamelCase )
else:
lowerCamelCase__ : Any =None
if audio_target is not None:
lowerCamelCase__ : List[str] =self.feature_extractor(audio_target=lowerCamelCase, *lowerCamelCase, sampling_rate=lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : Tuple =targets['''input_values''']
elif text_target is not None:
lowerCamelCase__ : Dict =self.tokenizer(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : int =targets['''input_ids''']
else:
lowerCamelCase__ : List[str] =None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase__ : Dict =labels
lowerCamelCase__ : Any =targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowerCamelCase__ : Dict =decoder_attention_mask
return inputs
def snake_case ( self : int, *lowerCamelCase : Optional[Any], **lowerCamelCase : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : List[Any] =kwargs.pop('''input_values''', lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =kwargs.pop('''input_ids''', lowerCamelCase )
lowerCamelCase__ : Optional[Any] =kwargs.pop('''labels''', lowerCamelCase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
lowerCamelCase__ : List[str] =self.feature_extractor.pad(lowerCamelCase, *lowerCamelCase, **lowerCamelCase )
elif input_ids is not None:
lowerCamelCase__ : Tuple =self.tokenizer.pad(lowerCamelCase, **lowerCamelCase )
else:
lowerCamelCase__ : Any =None
if labels is not None:
if "input_ids" in labels or (isinstance(lowerCamelCase, lowerCamelCase ) and "input_ids" in labels[0]):
lowerCamelCase__ : str =self.tokenizer.pad(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : List[Any] =targets['''input_ids''']
else:
lowerCamelCase__ : Any =self.feature_extractor.feature_size
lowerCamelCase__ : Optional[Any] =self.feature_extractor.num_mel_bins
lowerCamelCase__ : Optional[int] =self.feature_extractor.pad(lowerCamelCase, *lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : List[Any] =feature_size_hack
lowerCamelCase__ : Tuple =targets['''input_values''']
else:
lowerCamelCase__ : Optional[Any] =None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase__ : Tuple =labels
lowerCamelCase__ : Optional[int] =targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowerCamelCase__ : Optional[Any] =decoder_attention_mask
return inputs
def snake_case ( self : List[str], *lowerCamelCase : Union[str, Any], **lowerCamelCase : List[Any] )-> List[Any]:
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase )
def snake_case ( self : List[str], *lowerCamelCase : List[Any], **lowerCamelCase : Tuple )-> int:
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase )
| 272
| 0
|
def lowercase_ ( _lowerCamelCase : int):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
lowercase__ : Dict = 1
lowercase__ : int = 1
while repunit:
lowercase__ : Optional[Any] = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowercase_ ( _lowerCamelCase : int = 100_0000):
lowercase__ : Tuple = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"{solution() = }")
| 87
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCamelCase = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
UpperCamelCase = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
UpperCamelCase = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ) -> Any:
lowercase__ : Optional[int] = len(references[0] )
if any(len(lowercase_ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
lowercase__ : Union[str, Any] = [[refs[i] for refs in references] for i in range(lowercase_ )]
lowercase__ : str = TER(
normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , )
lowercase__ : List[str] = sb_ter.corpus_score(lowercase_ , lowercase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 87
| 1
|
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
lowercase = 2_9_9_7_9_2_4_5_8
# Symbols
lowercase , lowercase , lowercase , lowercase = symbols("""ct x y z""")
def lowerCamelCase_ ( UpperCamelCase__ : float ):
'''simple docstring'''
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCamelCase_ ( UpperCamelCase__ : float ):
'''simple docstring'''
return 1 / sqrt(1 - beta(UpperCamelCase__ ) ** 2 )
def lowerCamelCase_ ( UpperCamelCase__ : float ):
'''simple docstring'''
return np.array(
[
[gamma(UpperCamelCase__ ), -gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), 0, 0],
[-gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), gamma(UpperCamelCase__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCamelCase_ ( UpperCamelCase__ : float, UpperCamelCase__ : np.ndarray | None = None ):
'''simple docstring'''
if event is None:
UpperCamelCase__ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCamelCase__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
lowercase = transform(2_9_9_7_9_2_4_5)
print("""Example of four vector: """)
print(f'ct\' = {four_vector[0]}')
print(f'x\' = {four_vector[1]}')
print(f'y\' = {four_vector[2]}')
print(f'z\' = {four_vector[3]}')
# Substitute symbols with numerical values
lowercase = {ct: c, x: 1, y: 1, z: 1}
lowercase = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'\n{numerical_vector}')
| 35
|
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowercase = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Dict=None ):
'''simple docstring'''
require_version(deps[pkg], UpperCamelCase__ )
| 35
| 1
|
"""simple docstring"""
from manim import *
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : Optional[int] = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase_ : str = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase_ : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase_ : List[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Optional[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Tuple = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
lowerCAmelCase_ : Union[str, Any] = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
lowerCAmelCase_ : Union[str, Any] = VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
lowerCAmelCase_ : Union[str, Any] = Text('CPU' , font_size=2_4 )
lowerCAmelCase_ : Union[str, Any] = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = [mem.copy() for i in range(4 )]
lowerCAmelCase_ : Optional[Any] = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
lowerCAmelCase_ : Union[str, Any] = Text('GPU' , font_size=2_4 )
lowerCAmelCase_ : List[str] = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
gpu.move_to([-1, -1, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Optional[Any] = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
lowerCAmelCase_ : int = Text('Model' , font_size=2_4 )
lowerCAmelCase_ : Any = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
model.move_to([3, -1.0, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = []
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : Union[str, Any] = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE_ ):
rect.set_stroke(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=SCREAMING_SNAKE_CASE_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=SCREAMING_SNAKE_CASE_ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=SCREAMING_SNAKE_CASE_ , buff=0.0 )
self.add(SCREAMING_SNAKE_CASE_ )
model_cpu_arr.append(SCREAMING_SNAKE_CASE_ )
self.add(*SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Union[str, Any] = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
lowerCAmelCase_ : Any = Text('Loaded Checkpoint' , font_size=2_4 )
lowerCAmelCase_ : Optional[Any] = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : Union[str, Any] = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : str = fill.copy().set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.7 )
target.move_to(SCREAMING_SNAKE_CASE_ )
ckpt_arr.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(SCREAMING_SNAKE_CASE_ )
self.add(*SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase_ : List[Any] = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=1_8 , )
blue_text.next_to(SCREAMING_SNAKE_CASE_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = MarkupText(
F"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
lowerCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Optional[int] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Optional[Any] = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
lowerCAmelCase_ : List[str] = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
lowerCAmelCase_ : Optional[int] = VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
lowerCAmelCase_ : Optional[int] = Text('Disk' , font_size=2_4 )
lowerCAmelCase_ : Union[str, Any] = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=3 ) , Write(SCREAMING_SNAKE_CASE_ , run_time=1 ) , Create(SCREAMING_SNAKE_CASE_ , run_time=1 ) )
lowerCAmelCase_ : Optional[int] = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : Dict = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(SCREAMING_SNAKE_CASE_ , run_time=1.5 ) )
self.play(*SCREAMING_SNAKE_CASE_ )
self.play(FadeOut(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase_ : List[str] = MarkupText(F"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=3 ) )
self.play(
FadeOut(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) , )
self.wait()
| 224
|
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ : Tuple = [0] * len(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Tuple = [1] * len(lowerCAmelCase__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCAmelCase__ ) ):
if indegree[i] == 0:
queue.append(lowerCAmelCase__ )
while queue:
lowerCAmelCase_ : Union[str, Any] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowerCAmelCase_ : Any = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowerCAmelCase__ )
print(max(lowerCAmelCase__ ) )
# Adjacency list of Graph
lowercase__ : Any = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 224
| 1
|
"""simple docstring"""
from math import pi, sqrt
def SCREAMING_SNAKE_CASE_ ( snake_case : float )-> float:
if num <= 0:
raise ValueError('math domain error' )
if num > 1_7_1.5:
raise OverflowError('math range error' )
elif num - int(snake_case ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(snake_case )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def SCREAMING_SNAKE_CASE_ ( )-> None:
assert gamma(0.5 ) == sqrt(snake_case )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
A_ : List[Any] =1.0
while num:
A_ : Optional[Any] =float(input("""Gamma of: """))
print(f'gamma({num}) = {gamma(num)}')
print("""\nEnter 0 to exit...""")
| 80
|
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
A_ : List[Any] =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
A_ : List[str] =parser.parse_args()
A_ : Any =download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 80
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 205
|
import os
def a ( ) -> Any:
"""simple docstring"""
with open(os.path.dirname(A__ ) + '/p022_names.txt' ) as file:
_lowercase =str(file.readlines()[0] )
_lowercase =names.replace('"' , '' ).split(',' )
names.sort()
_lowercase =0
_lowercase =0
for i, name in enumerate(A__ ):
for letter in name:
name_score += ord(A__ ) - 64
total_score += (i + 1) * name_score
_lowercase =0
return total_score
if __name__ == "__main__":
print(solution())
| 205
| 1
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Union[str, Any] = ['image_processor', 'tokenizer']
A_ : List[Any] = 'BridgeTowerImageProcessor'
A_ : List[str] = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__(self : Union[str, Any] , a__ : List[Any] , a__ : Any ):
"""simple docstring"""
super().__init__(_a , _a )
def __call__(self : Dict , a__ : Any , a__ : int = None , a__ : Union[str, Any] = True , a__ : List[Any] = False , a__ : Optional[int] = None , a__ : Dict = None , a__ : Optional[Any] = 0 , a__ : Optional[Any] = None , a__ : int = None , a__ : List[Any] = None , a__ : Optional[Any] = False , a__ : int = False , a__ : int = False , a__ : List[Any] = False , a__ : Any = True , a__ : int = None , **a__ : Any , ):
"""simple docstring"""
__snake_case = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
__snake_case = self.image_processor(
_a , return_tensors=_a , do_normalize=_a , do_center_crop=_a , **_a )
encoding.update(_a )
return encoding
def a (self : str , *a__ : int , **a__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_a , **_a )
def a (self : Optional[int] , *a__ : List[Any] , **a__ : Any ):
"""simple docstring"""
return self.tokenizer.decode(*_a , **_a )
@property
def a (self : Any ):
"""simple docstring"""
__snake_case = self.tokenizer.model_input_names
__snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 369
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Union[str, Any] = (DPMSolverSinglestepScheduler,)
A_ : Union[str, Any] = (('num_inference_steps', 25),)
def a (self : Dict , **a__ : Tuple ):
"""simple docstring"""
__snake_case = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf''' ),
'''variance_type''': None,
}
config.update(**a__ )
return config
def a (self : str , a__ : Any=0 , **a__ : Tuple ):
"""simple docstring"""
__snake_case = dict(self.forward_default_kwargs )
__snake_case = kwargs.pop('''num_inference_steps''' , a__ )
__snake_case = self.dummy_sample
__snake_case = 0.1 * sample
__snake_case = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__snake_case = self.get_scheduler_config(**a__ )
__snake_case = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals
__snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
__snake_case = scheduler_class.from_pretrained(a__ )
new_scheduler.set_timesteps(a__ )
# copy over dummy past residuals
__snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
__snake_case , __snake_case = sample, sample
for t in range(a__ , time_step + scheduler.config.solver_order + 1 ):
__snake_case = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
__snake_case = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
def a (self : List[Any] , a__ : Dict=0 , **a__ : List[str] ):
"""simple docstring"""
__snake_case = dict(self.forward_default_kwargs )
__snake_case = kwargs.pop('''num_inference_steps''' , a__ )
__snake_case = self.dummy_sample
__snake_case = 0.1 * sample
__snake_case = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals (must be after setting timesteps)
__snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
__snake_case = scheduler_class.from_pretrained(a__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a__ )
# copy over dummy past residual (must be after setting timesteps)
__snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
__snake_case = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
__snake_case = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a (self : int , a__ : Tuple=None , **a__ : List[str] ):
"""simple docstring"""
if scheduler is None:
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(**a__ )
__snake_case = scheduler_class(**a__ )
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(**a__ )
__snake_case = scheduler_class(**a__ )
__snake_case = 10
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
__snake_case = model(a__ , a__ )
__snake_case = scheduler.step(a__ , a__ , a__ ).prev_sample
return sample
def a (self : str ):
"""simple docstring"""
__snake_case = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__snake_case = 50
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__snake_case = model(a__ , a__ )
__snake_case = scheduler.step(a__ , a__ , a__ ).prev_sample
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_5_7_4 ) < 1E-3
def a (self : int ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__snake_case = self.full_loop(scheduler=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
__snake_case = DEISMultistepScheduler.from_config(scheduler.config )
__snake_case = DPMSolverMultistepScheduler.from_config(scheduler.config )
__snake_case = UniPCMultistepScheduler.from_config(scheduler.config )
__snake_case = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__snake_case = self.full_loop(scheduler=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def a (self : List[str] ):
"""simple docstring"""
self.check_over_configs(thresholding=a__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=a__ , prediction_type=a__ , sample_max_value=a__ , algorithm_type='''dpmsolver++''' , solver_order=a__ , solver_type=a__ , )
def a (self : Union[str, Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
__snake_case = self.full_loop(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
assert not torch.isnan(a__ ).any(), "Samples have nan numbers"
def a (self : List[str] ):
"""simple docstring"""
self.check_over_configs(lower_order_final=a__ )
self.check_over_configs(lower_order_final=a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('''inf''' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def a (self : Tuple ):
"""simple docstring"""
self.check_over_configs(variance_type=a__ )
self.check_over_configs(variance_type='''learned_range''' )
def a (self : int ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=a__ , time_step=0 )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.full_loop()
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def a (self : int ):
"""simple docstring"""
__snake_case = self.full_loop(use_karras_sigmas=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_2_4_8 ) < 1E-3
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.full_loop(prediction_type='''v_prediction''' )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.1_4_5_3 ) < 1E-3
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.0_6_4_9 ) < 1E-3
def a (self : int ):
"""simple docstring"""
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(thresholding=a__ , dynamic_thresholding_ratio=0 )
__snake_case = scheduler_class(**a__ )
__snake_case = 10
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter.half()
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
__snake_case = model(a__ , a__ )
__snake_case = scheduler.step(a__ , a__ , a__ ).prev_sample
assert sample.dtype == torch.floataa
| 238
| 0
|
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase_ ( __lowerCAmelCase = 1_00_00_00 , __lowerCAmelCase = 10 )-> int:
'''simple docstring'''
UpperCAmelCase : List[str] =defaultdict(_A )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCAmelCase : Dict =max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCAmelCase : Optional[int] =1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_A , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'{solution() = }')
| 348
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a__( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ViTImageProcessor if is_vision_available() else None
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (3, 32, 128)
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
lowerCAmelCase = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
lowerCAmelCase = os.path.join(self.tmpdirname , __lowerCAmelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
lowerCAmelCase = Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1))
return image_input
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
lowerCAmelCase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0)
lowerCAmelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""np""")
lowerCAmelCase = processor(images=__lowerCAmelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = processor(text=__lowerCAmelCase)
lowerCAmelCase = tokenizer(__lowerCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase):
processor()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.char_decode(__lowerCAmelCase)
lowerCAmelCase = tokenizer.batch_decode(__lowerCAmelCase)
lowerCAmelCase = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = None
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = torch.randn(1 , 27 , 38)
lowerCAmelCase = torch.randn(1 , 27 , 50257)
lowerCAmelCase = torch.randn(1 , 27 , 30522)
lowerCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 272
| 0
|
import argparse
import datetime
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
a :List[str] = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
a :List[str] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(UpperCAmelCase_ ) < 11:
raise ValueError('''Must be 10 characters long''' )
# Get month
a :List[str] = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('''Month must be between 1 - 12''' )
a :int = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
a :Any = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
a :int = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
a :Dict = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
a :List[Any] = datetime.date(int(UpperCAmelCase_ ) , int(UpperCAmelCase_ ) , int(UpperCAmelCase_ ) )
# Start math
if m <= 2:
a :Optional[int] = y - 1
a :int = m + 12
# maths var
a :List[str] = int(str(UpperCAmelCase_ )[:2] )
a :Any = int(str(UpperCAmelCase_ )[2:] )
a :Union[str, Any] = int(2.6 * m - 5.39 )
a :Union[str, Any] = int(c / 4 )
a :List[str] = int(k / 4 )
a :Dict = int(d + k )
a :Optional[Any] = int(t + u + v + x )
a :Optional[int] = int(z - (2 * c) )
a :List[str] = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
a :List[str] = F'''Your date {date_input}, is a {days[str(UpperCAmelCase_ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : List[Any] = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
snake_case : str = parser.parse_args()
zeller(args.date_input)
| 361
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 281
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["GLPNFeatureExtractor"]
__a = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35
| 1
|
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def __a ( _UpperCamelCase: int ) -> str:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
_snake_case = precision
_snake_case = ceil(precision / 14 )
_snake_case = 426_880 * Decimal(10_005 ).sqrt()
_snake_case = 1
_snake_case = 13_591_409
_snake_case = Decimal(_UpperCamelCase )
for k in range(1 , _UpperCamelCase ):
_snake_case = factorial(6 * k ) // (factorial(3 * k ) * factorial(_UpperCamelCase ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCamelCase_ : Any = 50
print(F'The first {n} digits of pi is: {pi(n)}')
| 142
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ : List[Any] = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Optional[Any] = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Optional[int] = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Tuple = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 142
| 1
|
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _UpperCamelCase ( __A ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {}
UpperCamelCase__ = tokenizer(example["content"] , truncation=__A )["input_ids"]
UpperCamelCase__ = len(example["content"] ) / len(output["input_ids"] )
return output
a__ : Optional[Any] = HfArgumentParser(PretokenizationArguments)
a__ : Optional[int] = parser.parse_args()
if args.num_workers is None:
a__ : str = multiprocessing.cpu_count()
a__ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a__ : List[str] = time.time()
a__ : Dict = load_dataset(args.dataset_name, split='train')
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
a__ : Union[str, Any] = time.time()
a__ : List[Any] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
a__ : Optional[int] = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 80
|
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
a__ : Tuple = {'UserAgent': UserAgent().random}
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
UpperCamelCase__ = script.contents[0]
UpperCamelCase__ = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase_ :
def __init__( self , a ):
UpperCamelCase__ = f'''https://www.instagram.com/{username}/'''
UpperCamelCase__ = self.get_json()
def __a ( self ):
UpperCamelCase__ = requests.get(self.url , headers=a ).text
UpperCamelCase__ = BeautifulSoup(a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __a ( self ):
return self.user_data["username"]
@property
def __a ( self ):
return self.user_data["full_name"]
@property
def __a ( self ):
return self.user_data["biography"]
@property
def __a ( self ):
return self.user_data["business_email"]
@property
def __a ( self ):
return self.user_data["external_url"]
@property
def __a ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def __a ( self ):
return self.user_data["edge_follow"]["count"]
@property
def __a ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __a ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def __a ( self ):
return self.user_data["is_verified"]
@property
def __a ( self ):
return self.user_data["is_private"]
def _UpperCamelCase ( __A = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCamelCase__ = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Any = InstagramUser('github')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 80
| 1
|
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Union[str, Any] = XLMTokenizer
__lowercase : List[str] = False
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(A_ ) )
def __UpperCamelCase ( self , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = 'lower newer'
UpperCamelCase = 'lower newer'
return input_text, output_text
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCamelCase = 'lower'
UpperCamelCase = ['low', 'er</w>']
UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = tokens + ['<unk>']
UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
@slow
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
UpperCamelCase = tokenizer.encode('sequence builders' , add_special_tokens=A_ )
UpperCamelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=A_ )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 351
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
# fmt: off
UpperCamelCase = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCamelCase = {'unk_token': '<unk>'}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
UpperCamelCase = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
UpperCamelCase = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(A_ , A_ )
def __UpperCamelCase ( self , **A_ ) -> Union[str, Any]:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **A_ )
def __UpperCamelCase ( self , **A_ ) -> Tuple:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **A_ )
def __UpperCamelCase ( self , **A_ ) -> Union[str, Any]:
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = self.get_image_processor()
UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A_ )
self.assertIsInstance(processor_fast.tokenizer , A_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A_ )
self.assertIsInstance(processor_fast.image_processor , A_ )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCamelCase = self.get_image_processor(do_normalize=A_ )
UpperCamelCase = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = image_processor(A_ , return_tensors='np' )
UpperCamelCase = processor(images=A_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase = 'lower newer'
UpperCamelCase = processor(text=A_ , return_tensors='np' )
UpperCamelCase = tokenizer(A_ , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase = 'lower newer'
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = 'google/owlvit-base-patch32'
UpperCamelCase = OwlViTProcessor.from_pretrained(A_ )
UpperCamelCase = ['cat', 'nasa badge']
UpperCamelCase = processor(text=A_ )
UpperCamelCase = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = 'google/owlvit-base-patch32'
UpperCamelCase = OwlViTProcessor.from_pretrained(A_ )
UpperCamelCase = [['cat', 'nasa badge'], ['person']]
UpperCamelCase = processor(text=A_ )
UpperCamelCase = 16
UpperCamelCase = len(A_ )
UpperCamelCase = max([len(A_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = 'google/owlvit-base-patch32'
UpperCamelCase = OwlViTProcessor.from_pretrained(A_ )
UpperCamelCase = ['cat', 'nasa badge']
UpperCamelCase = processor(text=A_ )
UpperCamelCase = 16
UpperCamelCase = inputs['input_ids']
UpperCamelCase = [
[49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(images=A_ , query_images=A_ )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase = processor.batch_decode(A_ )
UpperCamelCase = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
| 110
| 0
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
snake_case__ : Tuple = sorted(string.lower() )
return len(_lowerCAmelCase ) == len(set(_lowerCAmelCase ) )
if __name__ == "__main__":
__a = input("Enter a string ").strip()
__a = is_isogram(input_str)
print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 35
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[int] = "▁"
_lowercase : Optional[Any] = {"vocab_file": "spiece.model"}
_lowercase : Optional[Any] = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
_lowercase : Tuple = {
"google/pegasus-xsum": 5_1_2,
}
_lowercase : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
def __init__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Any="<pad>", lowerCamelCase : Optional[Any]="</s>", lowerCamelCase : Any="<unk>", lowerCamelCase : Tuple="<mask_2>", lowerCamelCase : int="<mask_1>", lowerCamelCase : Optional[Any]=None, lowerCamelCase : Dict=103, lowerCamelCase : Optional[Dict[str, Any]] = None, **lowerCamelCase : Optional[int], )-> None:
lowerCamelCase__ : Union[str, Any] =offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase, lowerCamelCase ):
raise TypeError(
F'''additional_special_tokens should be of type {type(lowerCamelCase )}, but is'''
F''' {type(lowerCamelCase )}''' )
lowerCamelCase__ : Any =(
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(lowerCamelCase ), self.offset - 1 )
]
if len(set(lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
lowerCamelCase__ : Optional[Any] =additional_special_tokens_extended
else:
lowerCamelCase__ : Tuple =[mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2, self.offset )]
lowerCamelCase__ : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCamelCase, unk_token=lowerCamelCase, mask_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token_sent=lowerCamelCase, offset=lowerCamelCase, additional_special_tokens=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase, )
lowerCamelCase__ : Optional[int] =mask_token_sent
lowerCamelCase__ : Optional[Any] =vocab_file
lowerCamelCase__ : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase )
# add special tokens to encoder dict
lowerCamelCase__ : Dict[int, str] ={
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1, self.offset - 1 )} )
lowerCamelCase__ : Dict[str, int] ={v: k for k, v in self.encoder.items()}
@property
def snake_case ( self : Union[str, Any] )-> int:
return len(self.sp_model ) + self.offset
def snake_case ( self : Optional[Any] )-> Dict[str, int]:
lowerCamelCase__ : List[Any] ={self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str )-> List[Any]:
lowerCamelCase__ : Optional[Any] =self.__dict__.copy()
lowerCamelCase__ : Optional[int] =None
return state
def __setstate__( self : Dict, lowerCamelCase : int )-> Optional[Any]:
lowerCamelCase__ : Optional[int] =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCamelCase__ : str ={}
lowerCamelCase__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case ( self : Any, lowerCamelCase : str )-> List[str]:
return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase )
def snake_case ( self : int, lowerCamelCase : str )-> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowerCamelCase__ : Any =self.sp_model.piece_to_id(lowerCamelCase )
return sp_id + self.offset
def snake_case ( self : Tuple, lowerCamelCase : int )-> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowerCamelCase__ : Any =self.sp_model.IdToPiece(index - self.offset )
return token
def snake_case ( self : List[Any], lowerCamelCase : Optional[int] )-> Any:
lowerCamelCase__ : Optional[int] =[]
lowerCamelCase__ : Tuple =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase ) + token
lowerCamelCase__ : str =[]
else:
current_sub_tokens.append(lowerCamelCase )
out_string += self.sp_model.decode(lowerCamelCase )
return out_string.strip()
def snake_case ( self : Union[str, Any], lowerCamelCase : Union[str, Any]=False )-> List[str]:
return 1
def snake_case ( self : Tuple, lowerCamelCase : Optional[int] )-> Tuple:
lowerCamelCase__ : Tuple =set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def snake_case ( self : Any, lowerCamelCase : List, lowerCamelCase : Optional[List] = None, lowerCamelCase : bool = False )-> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def snake_case ( self : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Optional[int]=None )-> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : List[str] =os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase, '''wb''' ) as fi:
lowerCamelCase__ : int =self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
| 238
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase__ : Optional[Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : str = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 190
|
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> float:
def get_matched_characters(__snake_case : str , __snake_case : str ) -> str:
__A : Optional[int] = []
__A : Optional[Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__A : str = int(max(0 , i - limit ) )
__A : str = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__snake_case )
__A : Dict = f'{_stra[0:_stra.index(__snake_case )]} {_stra[_stra.index(__snake_case ) + 1:]}'
return "".join(__snake_case )
# matching characters
__A : Any = get_matched_characters(__snake_case , __snake_case )
__A : str = get_matched_characters(__snake_case , __snake_case )
__A : str = len(__snake_case )
# transposition
__A : Tuple = (
len([(ca, ca) for ca, ca in zip(__snake_case , __snake_case ) if ca != ca] ) // 2
)
if not match_count:
__A : int = 0.0
else:
__A : Union[str, Any] = (
1
/ 3
* (
match_count / len(__snake_case )
+ match_count / len(__snake_case )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__A : Tuple = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 190
| 1
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
SCREAMING_SNAKE_CASE__ : List[Any] = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
SCREAMING_SNAKE_CASE__ : Optional[int] = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
SCREAMING_SNAKE_CASE__ : Optional[int] = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
SCREAMING_SNAKE_CASE__ : Dict = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def A ( ) -> List[Any]:
lowerCamelCase , lowerCamelCase : Tuple = randrange(len(_SCREAMING_SNAKE_CASE ) ), randrange(len(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Optional[Any] = ["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
lowerCamelCase , lowerCamelCase : Any = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def A ( _SCREAMING_SNAKE_CASE = 100 ) -> Tuple:
return (generate_random_hand() for _ in range(_SCREAMING_SNAKE_CASE ))
@pytest.mark.parametrize("hand, expected" ,_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[str]:
assert PokerHand(_SCREAMING_SNAKE_CASE )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" ,_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[str]:
assert PokerHand(_SCREAMING_SNAKE_CASE )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" ,_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowerCamelCase : str = PokerHand(_SCREAMING_SNAKE_CASE )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" ,_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
assert PokerHand(_SCREAMING_SNAKE_CASE )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" ,_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
assert PokerHand(_SCREAMING_SNAKE_CASE )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" ,_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
assert PokerHand(_SCREAMING_SNAKE_CASE ).compare_with(PokerHand(_SCREAMING_SNAKE_CASE ) ) == expected
@pytest.mark.parametrize("hand, other, expected" ,generate_random_hands() )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
assert PokerHand(_SCREAMING_SNAKE_CASE ).compare_with(PokerHand(_SCREAMING_SNAKE_CASE ) ) == expected
def A ( ) -> Tuple:
lowerCamelCase : Union[str, Any] = [PokerHand(_SCREAMING_SNAKE_CASE ) for hand in SORTED_HANDS]
lowerCamelCase : int = poker_hands.copy()
shuffle(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = chain(sorted(_SCREAMING_SNAKE_CASE ) )
for index, hand in enumerate(_SCREAMING_SNAKE_CASE ):
assert hand == poker_hands[index]
def A ( ) -> List[Any]:
# Test that five high straights are compared correctly.
lowerCamelCase : List[str] = [PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=_SCREAMING_SNAKE_CASE )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def A ( ) -> Optional[int]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
lowerCamelCase : Any = PokerHand("2C 4S AS 3D 5C" )
lowerCamelCase : Optional[int] = True
lowerCamelCase : List[str] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def A ( ) -> Union[str, Any]:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
lowerCamelCase : Any = 0
lowerCamelCase : str = os.path.abspath(os.path.dirname(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE ,"poker_hands.txt" )
with open(_SCREAMING_SNAKE_CASE ) as file_hand:
for line in file_hand:
lowerCamelCase : Tuple = line[:14].strip()
lowerCamelCase : Any = line[15:].strip()
lowerCamelCase , lowerCamelCase : int = PokerHand(_SCREAMING_SNAKE_CASE ), PokerHand(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = player.compare_with(_SCREAMING_SNAKE_CASE )
if output == "Win":
answer += 1
assert answer == 376
| 48
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
snake_case : List[str] = "facebook/wmt19-en-de"
snake_case : Dict = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
snake_case : List[str] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
snake_case : int = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
snake_case : Optional[Any] = tokenizer(["Making tiny model"], return_tensors="pt")
snake_case : List[str] = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
snake_case : Dict = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 281
| 0
|
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
_lowercase : int ="hf-internal-testing/tiny-random-bert"
_lowercase : List[Any] =os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
_lowercase : Any ="9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Union[str, Any] = cached_file(__lowercase , __lowercase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__lowercase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__lowercase , __lowercase ) ) )
with open(os.path.join(__lowercase , """refs""" , """main""" ) ) as f:
a__ : Optional[int] = f.read()
self.assertEqual(__lowercase , os.path.join(__lowercase , """snapshots""" , __lowercase , __lowercase ) )
self.assertTrue(os.path.isfile(__lowercase ) )
# File is cached at the same place the second time.
a__ : Tuple = cached_file(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
# Using a specific revision to test the full commit hash.
a__ : Union[str, Any] = cached_file(__lowercase , __lowercase , revision="""9b8c223""" )
self.assertEqual(__lowercase , os.path.join(__lowercase , """snapshots""" , __lowercase , __lowercase ) )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(__lowercase , """is not a valid model identifier""" ):
a__ : Tuple = cached_file("""tiny-random-bert""" , __lowercase )
with self.assertRaisesRegex(__lowercase , """is not a valid git identifier""" ):
a__ : int = cached_file(__lowercase , __lowercase , revision="""aaaa""" )
with self.assertRaisesRegex(__lowercase , """does not appear to have a file named""" ):
a__ : Dict = cached_file(__lowercase , """conf""" )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(__lowercase , """does not appear to have a file named""" ):
a__ : Optional[Any] = cached_file(__lowercase , """conf""" )
with open(os.path.join(__lowercase , """refs""" , """main""" ) ) as f:
a__ : int = f.read()
self.assertTrue(os.path.isfile(os.path.join(__lowercase , """.no_exist""" , __lowercase , """conf""" ) ) )
a__ : Optional[Any] = cached_file(__lowercase , """conf""" , _raise_exceptions_for_missing_entries=__lowercase )
self.assertIsNone(__lowercase )
a__ : Optional[Any] = cached_file(__lowercase , """conf""" , local_files_only=__lowercase , _raise_exceptions_for_missing_entries=__lowercase )
self.assertIsNone(__lowercase )
a__ : Optional[Any] = mock.Mock()
a__ : str = 5_0_0
a__ : Optional[int] = {}
a__ : Union[str, Any] = HTTPError
a__ : Tuple = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=__lowercase ) as mock_head:
a__ : Union[str, Any] = cached_file(__lowercase , """conf""" , _raise_exceptions_for_connection_errors=__lowercase )
self.assertIsNone(__lowercase )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , __lowercase ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , __lowercase ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , __lowercase ) )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__lowercase , """is not a valid model identifier""" ):
get_file_from_repo("""bert-base-case""" , __lowercase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__lowercase , """is not a valid git identifier""" ):
get_file_from_repo("""bert-base-cased""" , __lowercase , revision="""ahaha""" )
a__ : Optional[int] = get_file_from_repo("""bert-base-cased""" , __lowercase )
# The name is the cached name which is not very easy to test, so instead we load the content.
a__ : List[str] = json.loads(open(__lowercase , """r""" ).read() )
self.assertEqual(config["""hidden_size"""] , 7_6_8 )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
a__ : List[Any] = Path(__lowercase ) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(__lowercase , """a.txt""" ) , str(__lowercase ) )
self.assertIsNone(get_file_from_repo(__lowercase , """b.txt""" ) )
| 357
|
def lowerCAmelCase_ ( _lowercase : int) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase):
raise TypeError("""only integers accepted as input""")
else:
a__ : Any = str(abs(_lowercase))
a__ : str = [list(_lowercase) for char in range(len(_lowercase))]
for index in range(len(_lowercase)):
num_transpositions[index].pop(_lowercase)
return max(
int("""""".join(list(_lowercase))) for transposition in num_transpositions)
if __name__ == "__main__":
__import__("doctest").testmod()
| 266
| 0
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __SCREAMING_SNAKE_CASE :
_UpperCAmelCase : List[str]
_UpperCAmelCase : Optional[str] = None
# Automatically constructed
_UpperCAmelCase : ClassVar[str] = "dict"
_UpperCAmelCase : ClassVar[Any] = None
_UpperCAmelCase : str = field(default="Translation" ,init=lowerCAmelCase_ ,repr=lowerCAmelCase_ )
def __call__( self : List[Any] ) ->Tuple:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __lowerCamelCase ( self : Optional[int] ) ->Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __SCREAMING_SNAKE_CASE :
_UpperCAmelCase : Optional[List] = None
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[str] = None
# Automatically constructed
_UpperCAmelCase : ClassVar[str] = "dict"
_UpperCAmelCase : ClassVar[Any] = None
_UpperCAmelCase : str = field(default="TranslationVariableLanguages" ,init=lowerCAmelCase_ ,repr=lowerCAmelCase_ )
def __lowerCamelCase ( self : List[Any] ) ->List[str]:
lowerCamelCase__ : List[Any] = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase__ : List[Any] = len(self.languages ) if self.languages else None
def __call__( self : Any ) ->Optional[int]:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def __lowerCamelCase ( self : List[Any] , A : Any ) ->Tuple:
lowerCamelCase__ : Optional[int] = set(self.languages )
if self.languages and set(A ) - lang_set:
raise ValueError(
F"Some languages in example ({', '.join(sorted(set(A ) - lang_set ) )}) are not in valid set ({', '.join(A )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase__ : str = []
for lang, text in translation_dict.items():
if isinstance(A , A ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase__ , lowerCamelCase__ : List[Any] = zip(*sorted(A ) )
return {"language": languages, "translation": translations}
def __lowerCamelCase ( self : List[str] ) ->Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 142
|
from __future__ import annotations
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , A : int = 6 ) ->None:
lowerCamelCase__ : Node | None = None
lowerCamelCase__ : Node | None = None
self.create_linked_list(A )
def __lowerCamelCase ( self : Optional[int] , A : int ) ->None:
lowerCamelCase__ : Optional[int] = Node()
lowerCamelCase__ : List[str] = current_node
lowerCamelCase__ : Union[str, Any] = current_node
lowerCamelCase__ : List[str] = current_node
for _ in range(1 , A ):
lowerCamelCase__ : List[str] = Node()
lowerCamelCase__ : List[Any] = current_node
lowerCamelCase__ : Optional[Any] = previous_node
lowerCamelCase__ : Dict = current_node
lowerCamelCase__ : Union[str, Any] = self.front
lowerCamelCase__ : int = previous_node
def __lowerCamelCase ( self : Optional[int] ) ->bool:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __lowerCamelCase ( self : Optional[int] ) ->Any | None:
self.check_can_perform_operation()
return self.front.data if self.front else None
def __lowerCamelCase ( self : Optional[int] , A : Any ) ->None:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCamelCase__ : List[str] = self.rear.next
if self.rear:
lowerCamelCase__ : Optional[Any] = data
def __lowerCamelCase ( self : str ) ->Any:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCamelCase__ : List[Any] = self.front.data
lowerCamelCase__ : Optional[Any] = None
return data
lowerCamelCase__ : Optional[int] = self.front
lowerCamelCase__ : Optional[int] = old_front.next
lowerCamelCase__ : Any = old_front.data
lowerCamelCase__ : List[str] = None
return data
def __lowerCamelCase ( self : Dict ) ->None:
if self.is_empty():
raise Exception('''Empty Queue''' )
def __lowerCamelCase ( self : int ) ->None:
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ) ->None:
lowerCamelCase__ : Any | None = None
lowerCamelCase__ : Node | None = None
lowerCamelCase__ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142
| 1
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
__A ={
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
__A ={
'facebook/bart-base': 10_24,
'facebook/bart-large': 10_24,
'facebook/bart-large-mnli': 10_24,
'facebook/bart-large-cnn': 10_24,
'facebook/bart-large-xsum': 10_24,
'yjernite/bart_eli5': 10_24,
}
@lru_cache()
def _UpperCamelCase ( ):
UpperCAmelCase__ : Any = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase__ : List[Any] = bs[:]
UpperCAmelCase__ : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
UpperCAmelCase__ : List[Any] = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : str = set()
UpperCAmelCase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Optional[Any] = char
return pairs
class _snake_case ( a__ ):
lowerCAmelCase :List[str] = VOCAB_FILES_NAMES
lowerCAmelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase :Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , **_lowerCamelCase , ):
UpperCAmelCase__ : Optional[int] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase) if isinstance(_lowerCamelCase , _lowerCamelCase) else bos_token
UpperCAmelCase__ : int = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase) if isinstance(_lowerCamelCase , _lowerCamelCase) else eos_token
UpperCAmelCase__ : Any = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase) if isinstance(_lowerCamelCase , _lowerCamelCase) else sep_token
UpperCAmelCase__ : Any = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase) if isinstance(_lowerCamelCase , _lowerCamelCase) else cls_token
UpperCAmelCase__ : Optional[Any] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase) if isinstance(_lowerCamelCase , _lowerCamelCase) else unk_token
UpperCAmelCase__ : List[str] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase) if isinstance(_lowerCamelCase , _lowerCamelCase) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : Tuple = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase) if isinstance(_lowerCamelCase , _lowerCamelCase) else mask_token
super().__init__(
errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , **_lowerCamelCase , )
with open(_lowerCamelCase , encoding="""utf-8""") as vocab_handle:
UpperCAmelCase__ : Tuple = json.load(_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ : List[Any] = errors # how to handle errors in decoding
UpperCAmelCase__ : Optional[Any] = bytes_to_unicode()
UpperCAmelCase__ : int = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCamelCase , encoding="""utf-8""") as merges_handle:
UpperCAmelCase__ : List[Any] = merges_handle.read().split("""\n""")[1:-1]
UpperCAmelCase__ : Tuple = [tuple(merge.split()) for merge in bpe_merges]
UpperCAmelCase__ : Dict = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase))))
UpperCAmelCase__ : List[Any] = {}
UpperCAmelCase__ : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase__ : Tuple = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
@property
def snake_case__ ( self):
return len(self.encoder)
def snake_case__ ( self):
return dict(self.encoder , **self.added_tokens_encoder)
def snake_case__ ( self , _lowerCamelCase):
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Any = tuple(_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = get_pairs(_lowerCamelCase)
if not pairs:
return token
while True:
UpperCAmelCase__ : Dict = min(_lowerCamelCase , key=lambda _lowerCamelCase: self.bpe_ranks.get(_lowerCamelCase , float("""inf""")))
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = bigram
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : List[str] = 0
while i < len(_lowerCamelCase):
try:
UpperCAmelCase__ : Optional[int] = word.index(_lowerCamelCase , _lowerCamelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
UpperCAmelCase__ : Dict = j
if word[i] == first and i < len(_lowerCamelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
UpperCAmelCase__ : List[Any] = tuple(_lowerCamelCase)
UpperCAmelCase__ : Tuple = new_word
if len(_lowerCamelCase) == 1:
break
else:
UpperCAmelCase__ : Any = get_pairs(_lowerCamelCase)
UpperCAmelCase__ : List[Any] = """ """.join(_lowerCamelCase)
UpperCAmelCase__ : List[str] = word
return word
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : Any = []
for token in re.findall(self.pat , _lowerCamelCase):
UpperCAmelCase__ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase).split(""" """))
return bpe_tokens
def snake_case__ ( self , _lowerCamelCase):
return self.encoder.get(_lowerCamelCase , self.encoder.get(self.unk_token))
def snake_case__ ( self , _lowerCamelCase):
return self.decoder.get(_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : List[Any] = """""".join(_lowerCamelCase)
UpperCAmelCase__ : Tuple = bytearray([self.byte_decoder[c] for c in text]).decode("""utf-8""" , errors=self.errors)
return text
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
if not os.path.isdir(_lowerCamelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
UpperCAmelCase__ : str = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
UpperCAmelCase__ : List[str] = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""])
with open(_lowerCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCamelCase , ensure_ascii=_lowerCamelCase) + """\n""")
UpperCAmelCase__ : Optional[int] = 0
with open(_lowerCamelCase , """w""" , encoding="""utf-8""") as writer:
writer.write("""#version: 0.2\n""")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCamelCase: kv[1]):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""")
UpperCAmelCase__ : Tuple = token_index
writer.write(""" """.join(_lowerCamelCase) + """\n""")
index += 1
return vocab_file, merge_file
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : List[str] = [self.cls_token_id]
UpperCAmelCase__ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase)
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase)) + [1]
return [1] + ([0] * len(_lowerCamelCase)) + [1, 1] + ([0] * len(_lowerCamelCase)) + [1]
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
UpperCAmelCase__ : str = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=False , **_lowerCamelCase):
UpperCAmelCase__ : Optional[int] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase) > 0 and not text[0].isspace()):
UpperCAmelCase__ : Union[str, Any] = """ """ + text
return (text, kwargs)
| 283
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False ):
UpperCAmelCase__ : str = """backbone.""" if is_semantic else """"""
UpperCAmelCase__ : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False ):
for i in range(config.num_hidden_layers ):
UpperCAmelCase__ : Optional[Any] = """backbone.""" if is_semantic else """"""
# queries, keys and values
UpperCAmelCase__ : Any = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase__ : List[str] = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
UpperCAmelCase__ : int = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
UpperCAmelCase__ : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase__ : Any = q_bias
UpperCAmelCase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ : Any = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase__ : Any = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase__ : Dict = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
UpperCAmelCase__ : Dict = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
UpperCAmelCase__ : Union[str, Any] = gamma_a
UpperCAmelCase__ : str = gamma_a
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : int = dct.pop(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = val
def _UpperCamelCase ( ):
UpperCAmelCase__ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase__ : Dict = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
UpperCAmelCase__ : Optional[Any] = False if """rvlcdip""" in checkpoint_url else True
UpperCAmelCase__ : Any = BeitConfig(use_absolute_position_embeddings=UpperCamelCase__ , use_mask_token=UpperCamelCase__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase__ : Optional[Any] = 1_0_2_4
UpperCAmelCase__ : Dict = 4_0_9_6
UpperCAmelCase__ : Any = 2_4
UpperCAmelCase__ : Tuple = 1_6
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase__ : int = 1_6
UpperCAmelCase__ : List[str] = """huggingface/label-files"""
UpperCAmelCase__ : Optional[Any] = """rvlcdip-id2label.json"""
UpperCAmelCase__ : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase__ : Union[str, Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase__ : Optional[Any] = idalabel
UpperCAmelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase__ : Optional[Any] = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" )["""model"""]
UpperCAmelCase__ : List[str] = create_rename_keys(UpperCamelCase__ , has_lm_head=UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ , has_lm_head=UpperCamelCase__ )
# load HuggingFace model
UpperCAmelCase__ : str = BeitForMaskedImageModeling(UpperCamelCase__ ) if has_lm_head else BeitForImageClassification(UpperCamelCase__ )
model.eval()
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image
UpperCAmelCase__ : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ )
UpperCAmelCase__ : List[str] = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
UpperCAmelCase__ : Optional[Any] = encoding["""pixel_values"""]
UpperCAmelCase__ : Optional[Any] = model(UpperCamelCase__ )
UpperCAmelCase__ : int = outputs.logits
# verify logits
UpperCAmelCase__ : int = [1, 1_6] if """rvlcdip""" in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(UpperCamelCase__ ), "Shape of logits not as expected"
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
if has_lm_head:
UpperCAmelCase__ : Union[str, Any] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
UpperCAmelCase__ : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=UpperCamelCase__ , )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=UpperCamelCase__ , )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
__A =parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 283
| 1
|
import logging
import os
from .state import PartialState
class __A ( logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def __lowerCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
lowerCamelCase__ = kwargs.pop('''main_process_only''' , UpperCamelCase_ )
lowerCamelCase__ = kwargs.pop('''in_order''' , UpperCamelCase_ )
if self.isEnabledFor(UpperCamelCase_ ):
if self._should_log(UpperCamelCase_ ):
lowerCamelCase__ , lowerCamelCase__ = self.process(UpperCamelCase_ , UpperCamelCase_ )
self.logger.log(UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
elif in_order:
lowerCamelCase__ = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowerCamelCase__ , lowerCamelCase__ = self.process(UpperCamelCase_ , UpperCamelCase_ )
self.logger.log(UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
state.wait_for_everyone()
def lowerCAmelCase__(__snake_case ,__snake_case = None ) -> Union[str, Any]:
'''simple docstring'''
if log_level is None:
lowerCamelCase__ = os.environ.get('''ACCELERATE_LOG_LEVEL''' ,__snake_case )
lowerCamelCase__ = logging.getLogger(__snake_case )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__snake_case ,{} )
| 209
|
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = 0
lowercase__ = len(SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowercase__ = i + 1
else:
lowercase__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 110
| 0
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , split=lowerCamelCase__ , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , streaming=lowerCamelCase__ , num_proc=lowerCamelCase__ , **lowerCamelCase__ , )
__lowerCamelCase = path_or_paths if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else {self.split: path_or_paths}
__lowerCamelCase = Text(
cache_dir=lowerCamelCase__ , data_files=lowerCamelCase__ , features=lowerCamelCase__ , **lowerCamelCase__ , )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
# Build iterable dataset
if self.streaming:
__lowerCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , num_proc=self.num_proc , )
__lowerCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
| 348
|
import sys
from collections import defaultdict
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = []
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.node_position[vertex]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = pos
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase = 2 * start + 1
else:
__lowerCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase = temp, tempa
__lowerCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCamelCase__ )
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = position[index]
while index != 0:
__lowerCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase = heap[parent]
__lowerCamelCase = position[parent]
self.set_position(position[parent] , lowerCamelCase__ )
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , lowerCamelCase__ )
break
__lowerCamelCase = parent
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , 0 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = len(lowerCamelCase__ ) // 2 - 1
for i in range(lowerCamelCase__ , -1 , -1 ):
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = positions[0]
__lowerCamelCase = sys.maxsize
self.top_to_bottom(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ )
return temp
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Heap()
__lowerCamelCase = [0] * len(UpperCamelCase__ )
__lowerCamelCase = [-1] * len(UpperCamelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase = []
for vertex in range(len(UpperCamelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase__ )
heap.node_position.append(UpperCamelCase__ )
__lowerCamelCase = []
__lowerCamelCase = 1
__lowerCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase = 0
__lowerCamelCase = distance
heap.heapify(UpperCamelCase__ , UpperCamelCase__ )
for _ in range(1 , len(UpperCamelCase__ ) ):
__lowerCamelCase = heap.delete_minimum(UpperCamelCase__ , UpperCamelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase__ )]
):
__lowerCamelCase = distance
heap.bottom_to_top(
UpperCamelCase__ , heap.get_position(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__A = int(input("Enter number of edges: ").strip())
__A = defaultdict(list)
for _ in range(edges_number):
__A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 348
| 1
|
'''simple docstring'''
import re
import string
import numpy as np
import datasets
lowercase__ : List[Any] = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
lowercase__ : List[Any] = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
lowercase__ : Any = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__A : Optional[int] = np.array([re.sub(_UpperCAmelCase , '' , _UpperCAmelCase) for x in predictions])
__A : Dict = np.array([re.sub(_UpperCAmelCase , '' , _UpperCAmelCase) for x in references])
else:
__A : int = np.asarray(_UpperCAmelCase)
__A : Optional[int] = np.asarray(_UpperCAmelCase)
if ignore_case:
__A : Any = np.char.lower(_UpperCAmelCase)
__A : Any = np.char.lower(_UpperCAmelCase)
if ignore_punctuation:
__A : Dict = string.punctuation.maketrans('' , '' , string.punctuation)
__A : List[Any] = np.char.translate(_UpperCAmelCase , table=_UpperCAmelCase)
__A : Any = np.char.translate(_UpperCAmelCase , table=_UpperCAmelCase)
if ignore_numbers:
__A : Optional[int] = string.digits.maketrans('' , '' , string.digits)
__A : List[str] = np.char.translate(_UpperCAmelCase , table=_UpperCAmelCase)
__A : Union[str, Any] = np.char.translate(_UpperCAmelCase , table=_UpperCAmelCase)
__A : Dict = predictions == references
return {"exact_match": np.mean(_UpperCAmelCase) * 100}
| 190
|
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE :
@staticmethod
def SCREAMING_SNAKE_CASE ( *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
pass
def _lowerCAmelCase ( __snake_case : Image ) -> str:
__A : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _lowerCAmelCase ( __snake_case : Image ) -> Dict:
__A : Dict = np.array(__snake_case )
__A : List[Any] = npimg.shape
return {"hash": hashimage(__snake_case ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowerCAmelCase = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = MaskGenerationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase)
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = pipeline('mask-generation' , model='facebook/sam-vit-huge')
__A : Tuple = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256)
# Shortening by hashing
__A : int = []
for i, o in enumerate(outputs['masks']):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9967},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9909},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9879},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9834},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9716},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9612},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9552},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9532},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9499},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9483},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9408},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9335},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9326},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9262},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8986},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8984},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8873},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = 'facebook/sam-vit-huge'
__A : Optional[Any] = pipeline('mask-generation' , model=_UpperCAmelCase)
__A : int = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256)
# Shortening by hashing
__A : int = []
for i, o in enumerate(outputs['masks']):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0210},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053},
] , )
| 190
| 1
|
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = 'https://openaipublic.azureedge.net/jukebox/models/'
__lowerCAmelCase = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
_snake_case = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
_snake_case = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
_snake_case = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
_snake_case = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
_snake_case = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
_snake_case = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_snake_case = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
_snake_case = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = {}
import re
_snake_case = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
_snake_case = re.compile(
R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
_snake_case = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
_snake_case = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
_snake_case = re.compile(
R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
_snake_case = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
_snake_case = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
_snake_case = re.compile(
R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
_snake_case = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_SCREAMING_SNAKE_CASE ):
_snake_case = re_encoder_block_conv_in.match(_SCREAMING_SNAKE_CASE )
_snake_case = regex_match.groups()
_snake_case = int(groups[2] ) * 2 + int(groups[3] )
_snake_case = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
_snake_case = re_encoder_block_conv_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
_snake_case = re_encoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
_snake_case = regex_match.groups()
_snake_case = int(groups[2] ) * 2 + int(groups[3] )
_snake_case = {'''1''': 1, '''3''': 2}[groups[-2]]
_snake_case = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
_snake_case = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_snake_case = prefix + resnet_block
_snake_case = re_encoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_proj_out.fullmatch(_SCREAMING_SNAKE_CASE ):
_snake_case = re_encoder_block_proj_out.match(_SCREAMING_SNAKE_CASE )
_snake_case = regex_match.groups()
_snake_case = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
_snake_case = re_encoder_block_proj_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
_snake_case = re_decoder_block_conv_out.match(_SCREAMING_SNAKE_CASE )
_snake_case = regex_match.groups()
_snake_case = int(groups[2] ) * 2 + int(groups[3] ) - 2
_snake_case = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
_snake_case = re_decoder_block_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
_snake_case = re_decoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
_snake_case = regex_match.groups()
_snake_case = int(groups[2] ) * 2 + int(groups[3] ) - 2
_snake_case = {'''1''': 1, '''3''': 2}[groups[-2]]
_snake_case = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
_snake_case = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_snake_case = prefix + resnet_block
_snake_case = re_decoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
_snake_case = re_decoder_block_proj_in.match(_SCREAMING_SNAKE_CASE )
_snake_case = regex_match.groups()
_snake_case = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
_snake_case = re_decoder_block_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
_snake_case = re_prior_cond_conv_out.match(_SCREAMING_SNAKE_CASE )
_snake_case = regex_match.groups()
_snake_case = int(groups[1] ) * 2 + int(groups[2] ) - 2
_snake_case = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
_snake_case = re_prior_cond_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
_snake_case = re_prior_cond_resnet.match(_SCREAMING_SNAKE_CASE )
_snake_case = regex_match.groups()
_snake_case = int(groups[1] ) * 2 + int(groups[2] ) - 2
_snake_case = {'''1''': 1, '''3''': 2}[groups[-2]]
_snake_case = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
_snake_case = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_snake_case = prefix + resnet_block
_snake_case = re_prior_cond_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
_snake_case = re_prior_cond_proj_in.match(_SCREAMING_SNAKE_CASE )
_snake_case = regex_match.groups()
_snake_case = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
_snake_case = re_prior_cond_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# keep original key
else:
_snake_case = original_key
_snake_case = replace_key(_SCREAMING_SNAKE_CASE )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
_snake_case = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
_snake_case = original_key
_snake_case = original_key
_snake_case = value
return new_dict
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
_snake_case = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_SCREAMING_SNAKE_CASE )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_SCREAMING_SNAKE_CASE )
open(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , """wb""" ).write(r.content )
_snake_case = MODEL_MAPPING[model_name.split("""/""" )[-1]]
_snake_case = JukeboxConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
_snake_case = JukeboxModel(_SCREAMING_SNAKE_CASE )
_snake_case = []
_snake_case = {}
for i, dict_name in enumerate(_SCREAMING_SNAKE_CASE ):
_snake_case = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )['''model''']
_snake_case = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
_snake_case = old_dic[k]
elif k.endswith(""".w""" ):
_snake_case = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_snake_case = old_dic[k]
else:
_snake_case = old_dic[k]
_snake_case = '''vqvae''' if i == 0 else f"""priors.{3 - i}"""
_snake_case = fix_jukebox_keys(_SCREAMING_SNAKE_CASE , model.state_dict() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
weight_dict.append(_SCREAMING_SNAKE_CASE )
_snake_case = weight_dict.pop(0 )
model.vqvae.load_state_dict(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , """w""" ) as txtfile:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
return weight_dict
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
__lowerCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 371
|
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""" )
_snake_case = img
_snake_case = img.shape[1]
_snake_case = img.shape[0]
_snake_case = dst_width
_snake_case = dst_height
_snake_case = self.src_w / self.dst_w
_snake_case = self.src_h / self.dst_h
_snake_case = _snake_case = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def lowercase (self ) -> List[Any]:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
_snake_case = self.img[self.get_y(UpperCAmelCase )][self.get_x(UpperCAmelCase )]
def lowercase (self , UpperCAmelCase ) -> int:
return int(self.ratio_x * x )
def lowercase (self , UpperCAmelCase ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
__lowerCAmelCase , __lowerCAmelCase = 800, 600
__lowerCAmelCase = imread('image_data/lena.jpg', 1)
__lowerCAmelCase = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 270
| 0
|
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class a ( ctypes.Structure ):
"""simple docstring"""
UpperCAmelCase = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def _snake_case ( ):
if os.name == "nt":
A__ = CursorInfo()
A__ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
A__ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def _snake_case ( ):
if os.name == "nt":
A__ = CursorInfo()
A__ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
A__ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def _snake_case ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 335
|
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowercase_ = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
lowercase_ = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
lowercase_ = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), id='''references''' ),
} ), )
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[List[List[str]]], _lowerCamelCase : List[List[str]], _lowerCamelCase : int = 1, _lowerCamelCase : int = 4, ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_lowerCamelCase, hypotheses=_lowerCamelCase, min_len=_lowerCamelCase, max_len=_lowerCamelCase )
}
| 266
| 0
|
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase = 1000 ) -> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 363
|
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__A = datasets.logging.get_logger(__name__)
__A = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
__A = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
__A = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="dummy_doc" ) -> Dict:
_lowerCAmelCase ={doc: key_lines}
_lowerCAmelCase ={doc: sys_lines}
_lowerCAmelCase ={}
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , sys_doc_lines[doc] , __UpperCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
if remove_nested:
_lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"""Number of resulting singleton clusters in the key """
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""" )
return doc_coref_infos
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
_lowerCAmelCase =get_coref_infos(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase ={}
_lowerCAmelCase =0
_lowerCAmelCase =0
for name, metric in metrics:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =evaluator.evaluate_documents(__UpperCamelCase , __UpperCamelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_lowerCAmelCase =(conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def _lowerCamelCase(__UpperCamelCase ) -> Tuple:
_lowerCAmelCase =False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
_lowerCAmelCase =line.split()[5]
if not parse_col == "-":
_lowerCAmelCase =True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Optional[Any]:
_lowerCAmelCase =[
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_lowerCAmelCase =util.check_gold_parse_annotation(__UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_lowerCAmelCase =evaluate(
key_lines=__UpperCAmelCase , sys_lines=__UpperCAmelCase , metrics=__UpperCAmelCase , NP_only=__UpperCAmelCase , remove_nested=__UpperCAmelCase , keep_singletons=__UpperCAmelCase , min_span=__UpperCAmelCase , )
return score
| 341
| 0
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : List[str] = ["image_processor", "tokenizer"]
__A : int = "BlipImageProcessor"
__A : Tuple = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : List[str] = False
super().__init__(__A , __A )
lowerCamelCase : Dict = self.image_processor
def __call__( self , __A = None , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
lowerCamelCase : Optional[int] = self.tokenizer
lowerCamelCase : Any = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
return text_encoding
# add pixel_values
lowerCamelCase : int = self.image_processor(__A , return_tensors=__A )
if text is not None:
lowerCamelCase : Optional[int] = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
else:
lowerCamelCase : List[str] = None
if text_encoding is not None:
encoding_image_processor.update(__A )
return encoding_image_processor
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = self.tokenizer.model_input_names
lowerCamelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 283
|
def lowercase_( SCREAMING_SNAKE_CASE_ = 4000000 ):
'''simple docstring'''
lowerCamelCase : Any = [0, 1]
lowerCamelCase : Union[str, Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase : Union[str, Any] = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 283
| 1
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_UpperCAmelCase = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class snake_case_ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : str , _snake_case : Path , _snake_case : Union[str, None] = None , _snake_case : Union[List[str], None] = None , _snake_case : Union[str, List[str], None] = None , _snake_case : bool = True , )->Any:
'''simple docstring'''
__lowerCAmelCase : Any = [file for file in os.listdir(_snake_case ) if os.path.isfile(os.path.join(_snake_case , _snake_case ) )]
if identifier is not None:
__lowerCAmelCase : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_snake_case , _snake_case ):
for n_ in n_identifier:
__lowerCAmelCase : List[str] = [file for file in files if n_ not in file]
else:
__lowerCAmelCase : int = [file for file in files if n_identifier not in file]
__lowerCAmelCase : List[str] = ignore_files or []
ignore_files.append("""__init__.py""" )
__lowerCAmelCase : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , _snake_case )
if only_modules:
__lowerCAmelCase : Tuple = file.split(""".""" )[0]
try:
__lowerCAmelCase : List[str] = getattr(_snake_case , _snake_case )
__lowerCAmelCase : Optional[Any] = doctest.DocTestSuite(_snake_case )
__lowerCAmelCase : Any = unittest.TextTestRunner().run(_snake_case )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
__lowerCAmelCase : Any = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def UpperCAmelCase__ ( self : List[Any] )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Any = Path("""src/transformers""" )
__lowerCAmelCase : List[str] = """modeling"""
__lowerCAmelCase : Optional[Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(_snake_case , identifier=_snake_case , ignore_files=_snake_case )
def UpperCAmelCase__ ( self : Dict )->Tuple:
'''simple docstring'''
__lowerCAmelCase : List[str] = Path("""src/transformers""" )
__lowerCAmelCase : int = """tokenization"""
self.analyze_directory(_snake_case , identifier=_snake_case )
def UpperCAmelCase__ ( self : Tuple )->List[str]:
'''simple docstring'''
__lowerCAmelCase : int = Path("""src/transformers""" )
__lowerCAmelCase : Union[str, Any] = """configuration"""
self.analyze_directory(_snake_case , identifier=_snake_case )
def UpperCAmelCase__ ( self : Optional[int] )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = Path("""src/transformers""" )
__lowerCAmelCase : List[str] = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(_snake_case , n_identifier=_snake_case )
def UpperCAmelCase__ ( self : Optional[Any] )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : str = Path("""docs/source""" )
__lowerCAmelCase : int = ["""favicon.ico"""]
self.analyze_directory(_snake_case , ignore_files=_snake_case , only_modules=_snake_case )
| 232
|
def _SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
return [list(range(1_000 - i , -1_000 - i , -1 ) ) for i in range(1_000 )]
_UpperCAmelCase = generate_large_matrix()
_UpperCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] ) -> None:
assert all(row == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for row in grid )
assert all(list(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for col in zip(*SCREAMING_SNAKE_CASE ) )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[int] ) -> int:
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Tuple = len(SCREAMING_SNAKE_CASE ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__lowerCAmelCase : Any = (left + right) // 2
__lowerCAmelCase : Optional[int] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__lowerCAmelCase : Dict = mid + 1
else:
__lowerCAmelCase : str = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] ) -> int:
__lowerCAmelCase : Any = 0
__lowerCAmelCase : Union[str, Any] = len(grid[0] )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase : str = find_negative_index(grid[i][:bound] )
total += bound
return (len(SCREAMING_SNAKE_CASE ) * len(grid[0] )) - total
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] ) -> int:
return len([number for row in grid for number in row if number < 0] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] ) -> int:
__lowerCAmelCase : List[str] = 0
for row in grid:
for i, number in enumerate(SCREAMING_SNAKE_CASE ):
if number < 0:
total += len(SCREAMING_SNAKE_CASE ) - i
break
return total
def _SCREAMING_SNAKE_CASE ( ) -> None:
from timeit import timeit
print("""Running benchmarks""" )
__lowerCAmelCase : Tuple = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__lowerCAmelCase : Optional[int] = timeit(F'''{func}(grid=grid)''' , setup=SCREAMING_SNAKE_CASE , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 232
| 1
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( lowerCamelCase__ ):
def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = None , **snake_case__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(
snake_case__ , split=snake_case__ , features=snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ , streaming=snake_case__ , num_proc=snake_case__ , **snake_case__ , )
UpperCAmelCase : str =path_or_paths if isinstance(snake_case__ , snake_case__ ) else {self.split: path_or_paths}
UpperCAmelCase : List[str] =Text(
cache_dir=snake_case__ , data_files=snake_case__ , features=snake_case__ , **snake_case__ , )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
if self.streaming:
UpperCAmelCase : str =self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase : Optional[Any] =None
UpperCAmelCase : Optional[int] =None
UpperCAmelCase : List[str] =None
UpperCAmelCase : Any =None
self.builder.download_and_prepare(
download_config=snake_case__ , download_mode=snake_case__ , verification_mode=snake_case__ , base_path=snake_case__ , num_proc=self.num_proc , )
UpperCAmelCase : List[Any] =self.builder.as_dataset(
split=self.split , verification_mode=snake_case__ , in_memory=self.keep_in_memory )
return dataset
| 348
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 348
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__: Dict = logging.get_logger(__name__)
__magic_name__: int = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Dict = '''distilbert'''
lowercase__ : Optional[int] = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , lowerCAmelCase__=3_05_22 , lowerCAmelCase__=5_12 , lowerCAmelCase__=False , lowerCAmelCase__=6 , lowerCAmelCase__=12 , lowerCAmelCase__=7_68 , lowerCAmelCase__=4 * 7_68 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.2 , lowerCAmelCase__=0 , **lowerCAmelCase__ , ) -> Optional[int]:
__magic_name__ : Union[str, Any] = vocab_size
__magic_name__ : Any = max_position_embeddings
__magic_name__ : Any = sinusoidal_pos_embds
__magic_name__ : List[str] = n_layers
__magic_name__ : str = n_heads
__magic_name__ : Optional[Any] = dim
__magic_name__ : List[Any] = hidden_dim
__magic_name__ : Tuple = dropout
__magic_name__ : Tuple = attention_dropout
__magic_name__ : int = activation
__magic_name__ : List[Any] = initializer_range
__magic_name__ : Optional[Any] = qa_dropout
__magic_name__ : str = seq_classif_dropout
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ )
class snake_case__ ( _lowerCAmelCase ):
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__magic_name__ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__magic_name__ : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 138
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__magic_name__: Any = logging.get_logger(__name__)
__magic_name__: Dict = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Any = '''blenderbot-small'''
lowercase__ : Optional[int] = ['''past_key_values''']
lowercase__ : Dict = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCAmelCase__=5_02_65 , lowerCAmelCase__=5_12 , lowerCAmelCase__=8 , lowerCAmelCase__=20_48 , lowerCAmelCase__=16 , lowerCAmelCase__=8 , lowerCAmelCase__=20_48 , lowerCAmelCase__=16 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="gelu" , lowerCAmelCase__=5_12 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1 , lowerCAmelCase__=False , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=2 , **lowerCAmelCase__ , ) -> Tuple:
__magic_name__ : Tuple = vocab_size
__magic_name__ : List[str] = max_position_embeddings
__magic_name__ : Union[str, Any] = d_model
__magic_name__ : Optional[int] = encoder_ffn_dim
__magic_name__ : Union[str, Any] = encoder_layers
__magic_name__ : List[str] = encoder_attention_heads
__magic_name__ : List[Any] = decoder_ffn_dim
__magic_name__ : str = decoder_layers
__magic_name__ : List[str] = decoder_attention_heads
__magic_name__ : Union[str, Any] = dropout
__magic_name__ : Tuple = attention_dropout
__magic_name__ : List[Any] = activation_dropout
__magic_name__ : List[Any] = activation_function
__magic_name__ : Optional[int] = init_std
__magic_name__ : Dict = encoder_layerdrop
__magic_name__ : Union[str, Any] = decoder_layerdrop
__magic_name__ : Optional[int] = use_cache
__magic_name__ : List[Any] = encoder_layers
__magic_name__ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , forced_eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
class snake_case__ ( _lowerCAmelCase ):
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Optional[int] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__magic_name__ : List[Any] = {0: """batch"""}
__magic_name__ : Dict = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__magic_name__ : List[str] = {0: """batch""", 1: """decoder_sequence"""}
__magic_name__ : Any = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__magic_name__ : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__magic_name__ ,__magic_name__ : Dict = self.num_layers
for i in range(lowerCAmelCase__ ):
__magic_name__ : Dict = {0: """batch""", 2: """past_sequence + sequence"""}
__magic_name__ : int = {0: """batch""", 2: """past_sequence + sequence"""}
else:
__magic_name__ : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Any = super().outputs
else:
__magic_name__ : int = super(lowerCAmelCase__ , self ).outputs
if self.use_past:
__magic_name__ ,__magic_name__ : str = self.num_layers
for i in range(lowerCAmelCase__ ):
__magic_name__ : Tuple = {0: """batch""", 2: """past_sequence + sequence"""}
__magic_name__ : Dict = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
__magic_name__ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Generate decoder inputs
__magic_name__ : Optional[int] = seq_length if not self.use_past else 1
__magic_name__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Optional[Any] = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__magic_name__ : Dict = dict(**lowerCAmelCase__ , **lowerCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__magic_name__ ,__magic_name__ : List[Any] = common_inputs["""input_ids"""].shape
__magic_name__ : Optional[Any] = common_inputs["""decoder_input_ids"""].shape[1]
__magic_name__ ,__magic_name__ : str = self.num_attention_heads
__magic_name__ : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__magic_name__ : Any = decoder_seq_length + 3
__magic_name__ : Dict = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__magic_name__ : List[str] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ )] , dim=1 )
__magic_name__ : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__magic_name__ ,__magic_name__ : List[str] = self.num_layers
__magic_name__ : Optional[Any] = min(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[str] = max(lowerCAmelCase__ , lowerCAmelCase__ ) - min_num_layers
__magic_name__ : Tuple = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(lowerCAmelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
) )
# TODO: test this.
__magic_name__ : Union[str, Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(lowerCAmelCase__ , lowerCAmelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) )
return common_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
__magic_name__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__magic_name__ ,__magic_name__ : Tuple = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__magic_name__ : List[Any] = seqlen + 2
__magic_name__ ,__magic_name__ : Any = self.num_layers
__magic_name__ ,__magic_name__ : int = self.num_attention_heads
__magic_name__ : Optional[int] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__magic_name__ : Optional[int] = common_inputs["""attention_mask"""].dtype
__magic_name__ : Optional[Any] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 )
__magic_name__ : Tuple = [
(torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(lowerCAmelCase__ )
]
return common_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__magic_name__ : Tuple = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__magic_name__ : str = tokenizer.num_special_tokens_to_add(lowerCAmelCase__ )
__magic_name__ : List[str] = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
__magic_name__ : List[Any] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
__magic_name__ : List[str] = dict(tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ ) )
return common_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
elif self.task == "causal-lm":
__magic_name__ : str = self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
else:
__magic_name__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
return common_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : List[Any] = super()._flatten_past_key_values_(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
__magic_name__ : Tuple = super(lowerCAmelCase__ , self )._flatten_past_key_values_(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
| 138
| 1
|
'''simple docstring'''
def lowerCamelCase ( __lowerCamelCase : int = 400_0000 ) ->int:
_SCREAMING_SNAKE_CASE = [0, 1]
_SCREAMING_SNAKE_CASE = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
_SCREAMING_SNAKE_CASE = 0
for j in range(len(__lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 58
|
import os
def __magic_name__ ( ) -> str:
__lowerCamelCase = os.path.join(os.path.dirname(__lowerCAmelCase ) , '''num.txt''' )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 270
| 0
|
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class A_ ( pl.LightningModule ):
def __init__( self : Dict , snake_case_ : int ):
super().__init__()
_UpperCAmelCase = model
_UpperCAmelCase = 2
_UpperCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def lowercase ( self : Dict ):
pass
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str , __lowercase : str ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = LongformerModel.from_pretrained(snake_case_ )
_UpperCAmelCase = LightningModel(snake_case_ )
_UpperCAmelCase = torch.load(snake_case_ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_UpperCAmelCase = LongformerForQuestionAnswering.from_pretrained(snake_case_ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(snake_case_ )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 355
|
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : str
_lowerCamelCase : int
def UpperCAmelCase_ ( __lowercase : str ) -> list[str]:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__lowercase ) )]
def UpperCAmelCase_ ( __lowercase : str ) -> BWTTransformDict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_UpperCAmelCase = all_rotations(__lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_UpperCAmelCase = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowercase ),
}
return response
def UpperCAmelCase_ ( __lowercase : str , __lowercase : int ) -> str:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_UpperCAmelCase = int(__lowercase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__lowercase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_UpperCAmelCase = [""] * len(__lowercase )
for _ in range(len(__lowercase ) ):
for i in range(len(__lowercase ) ):
_UpperCAmelCase = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''Provide a string that I will generate its BWT transform: '''
__SCREAMING_SNAKE_CASE :Dict = input(entry_msg).strip()
__SCREAMING_SNAKE_CASE :Optional[int] = bwt_transform(s)
print(
F"Burrows Wheeler transform for string '{s}' results "
F"in '{result['bwt_string']}'"
)
__SCREAMING_SNAKE_CASE :Optional[int] = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
F"we get original string '{original_string}'"
)
| 156
| 0
|
import csv
import tweepy
# Twitter API credentials
a =""""""
a =""""""
a =""""""
a =""""""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
# authorize twitter, initialize tweepy
__lowerCamelCase : Tuple = tweepy.OAuthHandler(lowerCamelCase__ , lowerCamelCase__ )
auth.set_access_token(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Optional[int] = tweepy.API(lowerCamelCase__ )
# initialize a list to hold all the tweepy Tweets
__lowerCamelCase : str = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__lowerCamelCase : Union[str, Any] = api.user_timeline(screen_name=lowerCamelCase__ , count=2_0_0 )
# save most recent tweets
alltweets.extend(lowerCamelCase__ )
# save the id of the oldest tweet less one
__lowerCamelCase : Any = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCamelCase__ ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
__lowerCamelCase : str = api.user_timeline(
screen_name=lowerCamelCase__ , count=2_0_0 , max_id=lowerCamelCase__ )
# save most recent tweets
alltweets.extend(lowerCamelCase__ )
# update the id of the oldest tweet less one
__lowerCamelCase : Optional[int] = alltweets[-1].id - 1
print(F"...{len(lowerCamelCase__ )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
__lowerCamelCase : str = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , 'w' ) as f:
__lowerCamelCase : Any = csv.writer(lowerCamelCase__ )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCamelCase__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 73
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__a :List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__a :Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
__a :Optional[int] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
__a :str = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = ElectraTokenizer
def __init__( self : Tuple , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=True , UpperCAmelCase : Any="[UNK]" , UpperCAmelCase : Union[str, Any]="[SEP]" , UpperCAmelCase : List[Any]="[PAD]" , UpperCAmelCase : Union[str, Any]="[CLS]" , UpperCAmelCase : List[Any]="[MASK]" , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Union[str, Any] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**UpperCAmelCase )
A_ = do_lower_case
def __A ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None ):
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Union[str, Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 312
| 0
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :List[str] = pd.read_csv('sample_data.csv', header=None)
SCREAMING_SNAKE_CASE :List[str] = df.shape[:1][0]
# If you're using some other dataset input the target column
SCREAMING_SNAKE_CASE :str = df.iloc[:, 1:2]
SCREAMING_SNAKE_CASE :Tuple = actual_data.values.reshape(len_data, 1)
SCREAMING_SNAKE_CASE :Optional[int] = MinMaxScaler().fit_transform(actual_data)
SCREAMING_SNAKE_CASE :Tuple = 10
SCREAMING_SNAKE_CASE :List[Any] = 5
SCREAMING_SNAKE_CASE :Optional[Any] = 20
SCREAMING_SNAKE_CASE :Optional[int] = len_data - periods * look_back
SCREAMING_SNAKE_CASE :Optional[int] = actual_data[:division]
SCREAMING_SNAKE_CASE :Tuple = actual_data[division - look_back :]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :int = [], []
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Dict = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
SCREAMING_SNAKE_CASE :Dict = np.array(train_x)
SCREAMING_SNAKE_CASE :Dict = np.array(test_x)
SCREAMING_SNAKE_CASE :int = np.array([list(i.ravel()) for i in train_y])
SCREAMING_SNAKE_CASE :Any = np.array([list(i.ravel()) for i in test_y])
SCREAMING_SNAKE_CASE :List[str] = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
SCREAMING_SNAKE_CASE :List[Any] = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
SCREAMING_SNAKE_CASE :List[str] = model.predict(x_test)
| 353
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,*A : Dict ,**A : Dict ):
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." ,A ,)
super().__init__(*A ,**A )
| 124
| 0
|
from __future__ import annotations
import math
lowercase : Any = '2020.9.26'
lowercase : Union[str, Any] = 'xcodz-dot, cclaus, dhruvmanila'
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float) -> tuple[float, float]:
'''simple docstring'''
if not all(isinstance(_lowerCamelCase , (float, int)) for val in locals().values()):
__UpperCamelCase : str = F'Input values must either be float or int: {list(locals().values())}'
raise TypeError(_lowerCamelCase)
__UpperCamelCase : List[str] = ((x * distance) / (z + distance)) * scale
__UpperCamelCase : List[Any] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : str , _lowerCamelCase : float) -> tuple[float, float, float]:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase):
raise TypeError("Axis must be a str")
__UpperCamelCase : str = locals()
del input_variables["axis"]
if not all(isinstance(_lowerCamelCase , (float, int)) for val in input_variables.values()):
__UpperCamelCase : Dict = (
"Input values except axis must either be float or int: "
F'{list(input_variables.values())}'
)
raise TypeError(_lowerCamelCase)
__UpperCamelCase : Optional[Any] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
__UpperCamelCase : Tuple = x * math.cos(_lowerCamelCase) - y * math.sin(_lowerCamelCase)
__UpperCamelCase : Union[str, Any] = y * math.cos(_lowerCamelCase) + x * math.sin(_lowerCamelCase)
__UpperCamelCase : Any = z
elif axis == "x":
__UpperCamelCase : Dict = y * math.cos(_lowerCamelCase) - z * math.sin(_lowerCamelCase)
__UpperCamelCase : Any = z * math.cos(_lowerCamelCase) + y * math.sin(_lowerCamelCase)
__UpperCamelCase : List[str] = x
elif axis == "y":
__UpperCamelCase : Any = x * math.cos(_lowerCamelCase) - z * math.sin(_lowerCamelCase)
__UpperCamelCase : Any = z * math.cos(_lowerCamelCase) + x * math.sin(_lowerCamelCase)
__UpperCamelCase : Dict = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'")
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }")
print(f"{rotate(1.0, 2.0, 3.0, 'y', 9_0.0) = }")
| 232
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase : Optional[Any] = TypeVar('T')
class lowerCamelCase__ ( Generic[T]):
'''simple docstring'''
_A = 42 # Cache store of keys
_A = 42 # References of the keys in cache
_A = 1_0 # Maximum capacity of cache
def __init__( self :Optional[Any] , a :int ) -> None:
__UpperCamelCase : Union[str, Any] = deque()
__UpperCamelCase : str = set()
if not n:
__UpperCamelCase : Union[str, Any] = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
__UpperCamelCase : Any = n
def _lowerCamelCase ( self :Tuple , a :T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__UpperCamelCase : int = self.dq_store.pop()
self.key_reference.remove(a )
else:
self.dq_store.remove(a )
self.dq_store.appendleft(a )
self.key_reference.add(a )
def _lowerCamelCase ( self :Any ) -> None:
for k in self.dq_store:
print(a )
def __repr__( self :Tuple ) -> str:
return f'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 232
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = "▁"
UpperCAmelCase_ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase_ = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
UpperCAmelCase_ = {
"google/pegasus-xsum": 512,
}
class UpperCamelCase_ ( _UpperCAmelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PegasusTokenizer
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="<pad>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<mask_2>" , lowerCAmelCase_="<mask_1>" , lowerCAmelCase_=None , lowerCAmelCase_=103 , **lowerCAmelCase_ , ) -> Union[str, Any]:
_snake_case = offset
if additional_special_tokens is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError(
F'''additional_special_tokens should be of type {type(lowercase_ )}, but is'''
F''' {type(lowercase_ )}''' )
_snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(lowercase_ ) , self.offset - 1 )
]
if len(set(lowercase_ ) ) != len(lowercase_ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_snake_case = additional_special_tokens_extended
else:
_snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , pad_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , mask_token=lowercase_ , mask_token_sent=lowercase_ , offset=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
_snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
F''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> Tuple:
if already_has_special_tokens:
return self._special_token_mask(lowercase_ )
elif token_ids_a is None:
return self._special_token_mask(lowercase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> int:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> int:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowercase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 371
|
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
_snake_case = [0 for i in range(r + 1 )]
# nc0 = 1
_snake_case = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_snake_case = min(UpperCamelCase__ , UpperCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 295
| 0
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError('String lengths must match!' )
lowerCAmelCase : List[Any] = 0
for chara, chara in zip(_UpperCAmelCase, _UpperCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138
|
__A : dict[tuple[int, int, int], int] = {}
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowerCAmelCase : Dict = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowerCAmelCase : int = _calculate(days - 1, _UpperCAmelCase, late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowerCAmelCase : List[Any] = _calculate(days - 1, absent + 1, 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowerCAmelCase : Optional[Any] = _calculate(days - 1, _UpperCAmelCase, 0 )
lowerCAmelCase : int = state_late + state_absent + state_ontime
lowerCAmelCase : Any = prizestrings
return prizestrings
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 30 ) -> int:
'''simple docstring'''
return _calculate(_UpperCAmelCase, absent=0, late=0 )
if __name__ == "__main__":
print(solution())
| 138
| 1
|
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
require_version("""pytorch_lightning>=1.0.4""")
_SCREAMING_SNAKE_CASE = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_SCREAMING_SNAKE_CASE = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_SCREAMING_SNAKE_CASE = sorted(arg_to_scheduler.keys())
_SCREAMING_SNAKE_CASE = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class SCREAMING_SNAKE_CASE_ ( pl.LightningModule ):
def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int=None , lowerCamelCase_ : Optional[Any]="base" , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Any=None , **lowerCamelCase_ : int , ):
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = 0
UpperCamelCase = Path(self.hparams.output_dir )
UpperCamelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCamelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
else:
UpperCamelCase = config
UpperCamelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert hasattr(self.config , SCREAMING_SNAKE_CASE_ ), f"""model config doesn\'t have a `{p}` attribute"""
setattr(self.config , SCREAMING_SNAKE_CASE_ , getattr(self.hparams , SCREAMING_SNAKE_CASE_ ) )
if tokenizer is None:
UpperCamelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=SCREAMING_SNAKE_CASE_ , )
else:
UpperCamelCase = tokenizer
UpperCamelCase = MODEL_MODES[mode]
if model is None:
UpperCamelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=SCREAMING_SNAKE_CASE_ , )
else:
UpperCamelCase = model
def lowerCamelCase_ ( self : Union[str, Any] , *lowerCamelCase_ : int , **lowerCamelCase_ : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_type.from_pretrained(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCamelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
UpperCamelCase = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model
UpperCamelCase = ["""bias""", """LayerNorm.weight"""]
UpperCamelCase = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
UpperCamelCase = Adafactor(
SCREAMING_SNAKE_CASE_ , lr=self.hparams.learning_rate , scale_parameter=SCREAMING_SNAKE_CASE_ , relative_step=SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase = AdamW(
SCREAMING_SNAKE_CASE_ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
UpperCamelCase = optimizer
UpperCamelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : str ):
"""simple docstring"""
return self.validation_step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
return self.validation_end(SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCamelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def lowerCamelCase_ ( self : int , lowerCamelCase_ : int ):
"""simple docstring"""
if stage == "test":
UpperCamelCase = len(self.test_dataloader().dataset )
else:
UpperCamelCase = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = len(self.train_dataloader().dataset )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any] = False ):
"""simple docstring"""
raise NotImplementedError("""You must implement this for your task""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
return self.train_loader
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Dict ):
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , """cached_{}_{}_{}""".format(
SCREAMING_SNAKE_CASE_ , list(filter(SCREAMING_SNAKE_CASE_ , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = self.output_dir.joinpath("""best_tfmr""" )
UpperCamelCase = self.step_count
self.model.save_pretrained(SCREAMING_SNAKE_CASE_ )
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
@staticmethod
def lowerCamelCase_ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
parser.add_argument(
"""--model_name_or_path""" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--config_name""" , default="""""" , type=SCREAMING_SNAKE_CASE_ , help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument(
"""--cache_dir""" , default=str(Path(SCREAMING_SNAKE_CASE_ ).parent / """test_run""" / """cache""" ) , type=SCREAMING_SNAKE_CASE_ , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , )
parser.add_argument(
"""--encoder_layerdrop""" , type=SCREAMING_SNAKE_CASE_ , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--decoder_layerdrop""" , type=SCREAMING_SNAKE_CASE_ , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--dropout""" , type=SCREAMING_SNAKE_CASE_ , help="""Dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--attention_dropout""" , type=SCREAMING_SNAKE_CASE_ , help="""Attention dropout probability (Optional). Goes into model.config""" , )
parser.add_argument("""--learning_rate""" , default=5E-5 , type=SCREAMING_SNAKE_CASE_ , help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""" , default="""linear""" , choices=SCREAMING_SNAKE_CASE_ , metavar=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="""Learning rate scheduler""" , )
parser.add_argument("""--weight_decay""" , default=0.0 , type=SCREAMING_SNAKE_CASE_ , help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=SCREAMING_SNAKE_CASE_ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""" , default=0 , type=SCREAMING_SNAKE_CASE_ , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""" , default=4 , type=SCREAMING_SNAKE_CASE_ , help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--train_batch_size""" , default=32 , type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--eval_batch_size""" , default=32 , type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--adafactor""" , action="""store_true""" )
class SCREAMING_SNAKE_CASE_ ( pl.Callback ):
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class SCREAMING_SNAKE_CASE_ ( pl.Callback ):
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE_ ( pl.Callback ):
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = trainer.lr_schedulers[0]["""scheduler"""]
UpperCamelCase = {f"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
rank_zero_info("""***** Validation results *****""" )
UpperCamelCase = trainer.callback_metrics
# Log results
for key in sorted(SCREAMING_SNAKE_CASE_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(SCREAMING_SNAKE_CASE_ , str(metrics[key] ) ) )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any ):
"""simple docstring"""
rank_zero_info("""***** Test results *****""" )
UpperCamelCase = trainer.callback_metrics
# Log and save results to file
UpperCamelCase = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" )
with open(SCREAMING_SNAKE_CASE_ , """w""" ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(SCREAMING_SNAKE_CASE_ , str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(SCREAMING_SNAKE_CASE_ , str(metrics[key] ) ) )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
parser.add_argument(
"""--output_dir""" , default=str(Path(lowerCAmelCase__ ).parent / """test_run""" / """model_checkpoints""" ) , type=lowerCAmelCase__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=lowerCAmelCase__ , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=lowerCAmelCase__ )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=lowerCAmelCase__ , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=lowerCAmelCase__ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=lowerCAmelCase__ , default=42 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(lowerCAmelCase__ ).parent / """test_run""" / """dummy-train-data""" ) , type=lowerCAmelCase__ , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[] , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ , ) -> Tuple:
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
UpperCamelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase__ )
# add custom checkpoints
if checkpoint_callback is None:
UpperCamelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase__ )
if logging_callback is None:
UpperCamelCase = LoggingCallback()
UpperCamelCase = {}
if args.fpaa:
UpperCamelCase = 16
if args.gpus > 1:
UpperCamelCase = """auto"""
UpperCamelCase = """ddp"""
UpperCamelCase = args.accumulate_grad_batches
UpperCamelCase = None
UpperCamelCase = """auto"""
UpperCamelCase = pl.Trainer.from_argparse_args(
lowerCAmelCase__ , weights_summary=lowerCAmelCase__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase__ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase__ , )
if args.do_train:
trainer.fit(lowerCAmelCase__ )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 366
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_SCREAMING_SNAKE_CASE = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_SCREAMING_SNAKE_CASE = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("""\n""".join(upper_files) + """\n""")
_SCREAMING_SNAKE_CASE = [file for file in filepaths if """ """ in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("""\n""".join(space_files) + """\n""")
_SCREAMING_SNAKE_CASE = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("""\n""".join(hyphen_files) + """\n""")
_SCREAMING_SNAKE_CASE = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("""\n""".join(nodir_files) + """\n""")
_SCREAMING_SNAKE_CASE = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 165
| 0
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Optional[int] ) -> str:
__snake_case = WavaVecaForSequenceClassification.from_pretrained(snake_case_ , config=snake_case_ )
__snake_case = downstream_dict['''projector.weight''']
__snake_case = downstream_dict['''projector.bias''']
__snake_case = downstream_dict['''model.post_net.linear.weight''']
__snake_case = downstream_dict['''model.post_net.linear.bias''']
return model
def lowerCamelCase__ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Tuple ) -> str:
__snake_case = WavaVecaForAudioFrameClassification.from_pretrained(snake_case_ , config=snake_case_ )
__snake_case = downstream_dict['''model.linear.weight''']
__snake_case = downstream_dict['''model.linear.bias''']
return model
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : str , snake_case_ : List[str] ) -> Dict:
__snake_case = WavaVecaForXVector.from_pretrained(snake_case_ , config=snake_case_ )
__snake_case = downstream_dict['''connector.weight''']
__snake_case = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__snake_case = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
__snake_case = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
__snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__snake_case = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : List[str] ) -> Tuple:
__snake_case = torch.load(snake_case_ , map_location='''cpu''' )
__snake_case = checkpoint['''Downstream''']
__snake_case = WavaVecaConfig.from_pretrained(snake_case_ )
__snake_case = WavaVecaFeatureExtractor.from_pretrained(
snake_case_ , return_attention_mask=snake_case_ , do_normalize=snake_case_ )
__snake_case = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__snake_case = convert_classification(snake_case_ , snake_case_ , snake_case_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__snake_case = convert_diarization(snake_case_ , snake_case_ , snake_case_ )
elif arch.endswith('''ForXVector''' ):
__snake_case = convert_xvector(snake_case_ , snake_case_ , snake_case_ )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
__snake_case = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(snake_case_ )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
snake_case_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 24
|
def UpperCAmelCase_ ( __lowerCAmelCase ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__lowercase : List[str] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 156
| 0
|
from maths.prime_check import is_prime
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ):
snake_case_ : Dict = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__a )
if is_prime(__a ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
import argparse
from collections import defaultdict
import yaml
_SCREAMING_SNAKE_CASE = """docs/source/en/_toctree.yml"""
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : List[Any] = defaultdict(__a )
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(__a )
snake_case_ : Any = new_doc_list
snake_case_ : str = [key for key, value in counts.items() if value > 1]
snake_case_ : Any = []
for duplicate_key in duplicates:
snake_case_ : Any = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
snake_case_ : str = sorted(__a , key=lambda __a : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__a ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(__a )
# Sort
return overview_doc
def SCREAMING_SNAKE_CASE__ ( __a=False ):
with open(__a , encoding='utf-8' ) as f:
snake_case_ : int = yaml.safe_load(f.read() )
# Get to the API doc
snake_case_ : str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case_ : Dict = content[api_idx]['sections']
# Then to the model doc
snake_case_ : Tuple = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
snake_case_ : Union[str, Any] = api_doc[scheduler_idx]['sections']
snake_case_ : Optional[Any] = clean_doc_toc(__a )
snake_case_ : int = False
if new_scheduler_doc != scheduler_doc:
snake_case_ : int = True
if overwrite:
snake_case_ : Union[str, Any] = new_scheduler_doc
if diff:
if overwrite:
snake_case_ : Optional[int] = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def SCREAMING_SNAKE_CASE__ ( __a=False ):
with open(__a , encoding='utf-8' ) as f:
snake_case_ : Dict = yaml.safe_load(f.read() )
# Get to the API doc
snake_case_ : Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case_ : str = content[api_idx]['sections']
# Then to the model doc
snake_case_ : List[Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
snake_case_ : Dict = False
snake_case_ : Union[str, Any] = api_doc[pipeline_idx]['sections']
snake_case_ : Union[str, Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
snake_case_ : Optional[Any] = pipeline_doc['section']
snake_case_ : Optional[int] = clean_doc_toc(__a )
if overwrite:
snake_case_ : Tuple = new_sub_pipeline_doc
new_pipeline_docs.append(__a )
# sort overall pipeline doc
snake_case_ : Optional[Any] = clean_doc_toc(__a )
if new_pipeline_docs != pipeline_docs:
snake_case_ : List[str] = True
if overwrite:
snake_case_ : List[str] = new_pipeline_docs
if diff:
if overwrite:
snake_case_ : List[Any] = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 88
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 50 ) -> int:
_lowerCAmelCase : int = [1] * (length + 1)
for row_length in range(3 ,length + 1 ):
for block_length in range(3 ,row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 44
|
from jiwer import compute_measures
import datasets
lowerCamelCase : str = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
lowerCamelCase : int = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
lowerCamelCase : str = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase (datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def UpperCAmelCase ( self , A=None , A=None , A=False ) -> List[Any]:
if concatenate_texts:
return compute_measures(A , A )["wer"]
else:
snake_case : Any = 0
snake_case : Any = 0
for prediction, reference in zip(A , A ):
snake_case : Tuple = compute_measures(A , A )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 124
| 0
|
import os
from collections.abc import Iterator
def SCREAMING_SNAKE_CASE_ ( __A : str = "." ) -> Iterator[str]:
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(__A ):
a_ : Dict = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__A )[1] in (".py", ".ipynb"):
yield os.path.join(__A , __A ).lstrip('./' )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return F"""{i * " "}*""" if i else "\n##"
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str:
"""simple docstring"""
a_ : Optional[int] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__A ) or old_parts[i] != new_part) and new_part:
print(F"""{md_prefix(__A )} {new_part.replace("_" , " " ).title()}""" )
return new_path
def SCREAMING_SNAKE_CASE_ ( __A : str = "." ) -> None:
"""simple docstring"""
a_ : Dict = ''
for filepath in sorted(good_file_paths(__A ) ):
a_ , a_ : str = os.path.split(__A )
if filepath != old_path:
a_ : List[Any] = print_path(__A , __A )
a_ : Dict = (filepath.count(os.sep ) + 1) if filepath else 0
a_ : int = F"""{filepath}/{filename}""".replace(' ' , '%20' )
a_ : Optional[Any] = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F"""{md_prefix(__A )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('.')
| 120
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a_ : str = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
a_ : Any = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
a_ : Dict = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
a_ : Dict = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
pipeline(task='fill-mask' , model=SCREAMING_SNAKE_CASE__ )
# baseline - just load from_pretrained with normal network
a_ : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
a_ : List[str] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a_ : Any = '1'
a_ : Optional[Any] = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a_ : Tuple = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
a_ : str = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
a_ : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
a_ : Tuple = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
pipeline(task='fill-mask' , model=SCREAMING_SNAKE_CASE__ )
# baseline - just load from_pretrained with normal network
a_ : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
a_ : Dict = self.get_env()
a_ : Dict = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a_ : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
a_ : Dict = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
a_ : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
a_ : Any = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
a_ : Tuple = self.get_env()
a_ : int = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
a_ : str = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a_ : Dict = '1'
a_ : int = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
a_ : Union[str, Any] = '\nfrom transformers import pipeline\n '
a_ : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
a_ : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
a_ : Union[str, Any] = self.get_env()
a_ : Optional[Any] = '1'
a_ : int = [sys.executable, '-c', '\n'.join([load, mock, run] )]
a_ : Union[str, Any] = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : Optional[int] = '\nfrom transformers import AutoModel\n '
a_ : Dict = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
a_ : Tuple = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
a_ : Optional[Any] = self.get_env()
a_ : Dict = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a_ : Optional[int] = '1'
a_ : Optional[Any] = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 120
| 1
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = args.log_outputs
A__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
A__ = load_metric('''wer''' )
A__ = load_metric('''cer''' )
# compute metrics
A__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
A__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
A__ = f"""WER: {wer_result}\nCER: {cer_result}"""
print(lowercase_ )
with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowercase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ = f"""log_{dataset_id}_predictions.txt"""
A__ = f"""log_{dataset_id}_targets.txt"""
with open(lowercase_ , '''w''' ) as p, open(lowercase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowercase_ , lowercase_ ):
p.write(f"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowercase_ , with_indices=lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
A__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ = re.sub(lowercase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
A__ = ''' '''.join(text.split(lowercase_ ) )
return text
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ = AutoFeatureExtractor.from_pretrained(args.model_id )
A__ = feature_extractor.sampling_rate
# resample audio
A__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowercase_ ) )
# load eval pipeline
if args.device is None:
A__ = 0 if torch.cuda.is_available() else -1
A__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowercase_ ):
A__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A__ = prediction['''text''']
A__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
A__ = dataset.map(lowercase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowercase_ , lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
_lowerCamelCase : str = parser.parse_args()
main(args)
| 14
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class A ( A_ ):
UpperCamelCase_ : torch.FloatTensor
class A ( A_ , A_ ):
@register_to_config
def __init__(self , lowerCAmelCase = 3 , lowerCAmelCase = 3 , lowerCAmelCase = ("DownEncoderBlock2D",) , lowerCAmelCase = ("UpDecoderBlock2D",) , lowerCAmelCase = (6_4,) , lowerCAmelCase = 1 , lowerCAmelCase = "silu" , lowerCAmelCase = 3 , lowerCAmelCase = 3_2 , lowerCAmelCase = 2_5_6 , lowerCAmelCase = 3_2 , lowerCAmelCase = None , lowerCAmelCase = 0.1_82_15 , lowerCAmelCase = "group" , ):
super().__init__()
# pass init params to Encoder
__lowercase= Encoder(
in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , down_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , double_z=lowerCAmelCase , )
__lowercase= vq_embed_dim if vq_embed_dim is not None else latent_channels
__lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 )
__lowercase= VectorQuantizer(lowerCAmelCase , lowerCAmelCase , beta=0.25 , remap=lowerCAmelCase , sane_index_shape=lowerCAmelCase )
__lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 )
# pass init params to Decoder
__lowercase= Decoder(
in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , up_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , norm_type=lowerCAmelCase , )
@apply_forward_hook
def _A (self , lowerCAmelCase , lowerCAmelCase = True ):
__lowercase= self.encoder(lowerCAmelCase )
__lowercase= self.quant_conv(lowerCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCAmelCase )
@apply_forward_hook
def _A (self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = True ):
# also go through quantization layer
if not force_not_quantize:
__lowercase, __lowercase, __lowercase= self.quantize(lowerCAmelCase )
else:
__lowercase= h
__lowercase= self.post_quant_conv(lowerCAmelCase )
__lowercase= self.decoder(lowerCAmelCase , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = True ):
__lowercase= sample
__lowercase= self.encode(lowerCAmelCase ).latents
__lowercase= self.decode(lowerCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase )
| 295
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : Any = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
lowercase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 287
|
'''simple docstring'''
def a__ ( lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(lowercase : int, lowercase : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
_UpperCamelCase = update_area_of_max_square(lowercase, col + 1 )
_UpperCamelCase = update_area_of_max_square(row + 1, col + 1 )
_UpperCamelCase = update_area_of_max_square(row + 1, lowercase )
if mat[row][col]:
_UpperCamelCase = 1 + min([right, diagonal, down] )
_UpperCamelCase = max(largest_square_area[0], lowercase )
return sub_problem_sol
else:
return 0
_UpperCamelCase = [0]
update_area_of_max_square(0, 0 )
return largest_square_area[0]
def a__ ( lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
_UpperCamelCase = update_area_of_max_square_using_dp_array(lowercase, col + 1, lowercase )
_UpperCamelCase = update_area_of_max_square_using_dp_array(row + 1, col + 1, lowercase )
_UpperCamelCase = update_area_of_max_square_using_dp_array(row + 1, lowercase, lowercase )
if mat[row][col]:
_UpperCamelCase = 1 + min([right, diagonal, down] )
_UpperCamelCase = max(largest_square_area[0], lowercase )
_UpperCamelCase = sub_problem_sol
return sub_problem_sol
else:
return 0
_UpperCamelCase = [0]
_UpperCamelCase = [[-1] * cols for _ in range(lowercase )]
update_area_of_max_square_using_dp_array(0, 0, lowercase )
return largest_square_area[0]
def a__ ( lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
"""simple docstring"""
_UpperCamelCase = [[0] * (cols + 1) for _ in range(rows + 1 )]
_UpperCamelCase = 0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
_UpperCamelCase = dp_array[row][col + 1]
_UpperCamelCase = dp_array[row + 1][col + 1]
_UpperCamelCase = dp_array[row + 1][col]
if mat[row][col] == 1:
_UpperCamelCase = 1 + min(lowercase, lowercase, lowercase )
_UpperCamelCase = max(dp_array[row][col], lowercase )
else:
_UpperCamelCase = 0
return largest_square_area
def a__ ( lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
"""simple docstring"""
_UpperCamelCase = [0] * (cols + 1)
_UpperCamelCase = [0] * (cols + 1)
_UpperCamelCase = 0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
_UpperCamelCase = current_row[col + 1]
_UpperCamelCase = next_row[col + 1]
_UpperCamelCase = next_row[col]
if mat[row][col] == 1:
_UpperCamelCase = 1 + min(lowercase, lowercase, lowercase )
_UpperCamelCase = max(current_row[col], lowercase )
else:
_UpperCamelCase = 0
_UpperCamelCase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 287
| 1
|
'''simple docstring'''
from PIL import Image
def __lowercase ( __lowercase , __lowercase ) -> Image:
'''simple docstring'''
_A = (259 * (level + 255)) / (255 * (259 - level))
def contrast(__lowercase ) -> int:
return int(128 + factor * (c - 128) )
return img.point(__lowercase )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
lowerCamelCase_ = change_contrast(img, 1_70)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 79
|
"""simple docstring"""
from collections import defaultdict
class lowerCamelCase :
def __init__( self : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ) -> Any:
SCREAMING_SNAKE_CASE__ = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
SCREAMING_SNAKE_CASE__ = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(__UpperCAmelCase ) )
]
SCREAMING_SNAKE_CASE__ = defaultdict(__UpperCAmelCase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
SCREAMING_SNAKE_CASE__ = (1 << len(__UpperCAmelCase )) - 1
def SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] ) -> Optional[int]:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
SCREAMING_SNAKE_CASE__ = self.count_ways_until(__UpperCAmelCase , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
SCREAMING_SNAKE_CASE__ = total_ways_util
return self.dp[mask][task_no]
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] ) -> List[str]:
# Store the list of persons for each task
for i in range(len(__UpperCAmelCase ) ):
for j in task_performed[i]:
self.task[j].append(__UpperCAmelCase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
A_ : Any = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
A_ : Any = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 165
| 0
|
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
snake_case_ : Dict = logging.get_logger(__name__)
class lowercase__ ( _UpperCamelCase ):
lowercase__ = CLIPConfig
lowercase__ = ['CLIPEncoderLayer']
def __init__( self : Any ,lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
super().__init__(_SCREAMING_SNAKE_CASE )
_UpperCamelCase : List[str] = CLIPVisionModelWithProjection(config.vision_config )
_UpperCamelCase : Tuple = nn.Linear(config.vision_config.projection_dim ,1 )
_UpperCamelCase : Optional[int] = nn.Linear(config.vision_config.projection_dim ,1 )
@torch.no_grad()
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[str]=0.5 ,lowerCamelCase__ : Any=0.5 ):
'''simple docstring'''
_UpperCamelCase : int = self.vision_model(_SCREAMING_SNAKE_CASE )[0]
_UpperCamelCase : str = self.p_head(_SCREAMING_SNAKE_CASE )
_UpperCamelCase : Dict = nsfw_detected.flatten()
_UpperCamelCase : List[Any] = nsfw_detected > p_threshold
_UpperCamelCase : Optional[Any] = nsfw_detected.tolist()
if any(_SCREAMING_SNAKE_CASE ):
logger.warning(
'Potential NSFW content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, nsfw_detected_ in enumerate(_SCREAMING_SNAKE_CASE ):
if nsfw_detected_:
_UpperCamelCase : List[str] = np.zeros(images[idx].shape )
_UpperCamelCase : Union[str, Any] = self.w_head(_SCREAMING_SNAKE_CASE )
_UpperCamelCase : List[str] = watermark_detected.flatten()
_UpperCamelCase : str = watermark_detected > w_threshold
_UpperCamelCase : Optional[Any] = watermark_detected.tolist()
if any(_SCREAMING_SNAKE_CASE ):
logger.warning(
'Potential watermarked content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, watermark_detected_ in enumerate(_SCREAMING_SNAKE_CASE ):
if watermark_detected_:
_UpperCamelCase : Optional[Any] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 368
|
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
snake_case_ : Dict = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[str] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
_UpperCamelCase : Optional[int] = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
_UpperCamelCase : Optional[Any] = config_class.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : List[str] = True
print(f'Building TensorFlow model from configuration: {config}' )
_UpperCamelCase : Any = model_class(UpperCAmelCase_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
_UpperCamelCase : Union[str, Any] = cached_file(
UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
_UpperCamelCase : List[Any] = load_pytorch_checkpoint_in_tfa_model(UpperCAmelCase_ , UpperCAmelCase_ )
if compare_with_pt_model:
_UpperCamelCase : Optional[int] = tf_model(tf_model.dummy_inputs , training=UpperCAmelCase_ ) # build the network
_UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : List[str] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=UpperCAmelCase_ , config=UpperCAmelCase_ , state_dict=UpperCAmelCase_ )
with torch.no_grad():
_UpperCamelCase : Optional[Any] = pt_model(**pt_model.dummy_inputs )
_UpperCamelCase : int = pto[0].numpy()
_UpperCamelCase : Any = tfo[0].numpy()
_UpperCamelCase : Dict = np.amax(np.abs(np_pt - np_tf ) )
print(f'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, f'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(f'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(UpperCAmelCase_ , save_format='h5' )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=False , ):
if args_model_type is None:
_UpperCamelCase : List[Any] = list(MODEL_CLASSES.keys() )
else:
_UpperCamelCase : Tuple = [args_model_type]
for j, model_type in enumerate(UpperCAmelCase_ , start=1 ):
print('=' * 1_0_0 )
print(f' Converting model type {j}/{len(UpperCAmelCase_ )}: {model_type}' )
print('=' * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
_UpperCamelCase : List[Any] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
_UpperCamelCase : int = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(UpperCAmelCase_ , UpperCAmelCase_ ) , start=1 ):
print('-' * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
_UpperCamelCase : Dict = model_shortcut_name
elif only_convert_finetuned_models:
print(f' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
f' Converting checkpoint {i}/{len(UpperCAmelCase_ )}: {model_shortcut_name} - model_type {model_type}' )
print('-' * 1_0_0 )
if config_shortcut_name in aws_config_map:
_UpperCamelCase : Any = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
else:
_UpperCamelCase : str = config_shortcut_name
if model_shortcut_name in aws_model_maps:
_UpperCamelCase : int = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
else:
_UpperCamelCase : List[str] = model_shortcut_name
if os.path.isfile(UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=UpperCAmelCase_ , pytorch_checkpoint_path=UpperCAmelCase_ , config_file=UpperCAmelCase_ , tf_dump_path=os.path.join(UpperCAmelCase_ , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=UpperCAmelCase_ , )
if remove_cached_files:
os.remove(UpperCAmelCase_ )
os.remove(UpperCAmelCase_ )
if __name__ == "__main__":
snake_case_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
snake_case_ : Optional[int] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 236
| 0
|
'''simple docstring'''
def __UpperCAmelCase ( a_: int, a_: Union[str, Any] = False ):
if not isinstance(A_, A_ ):
_UpperCAmelCase : str = f"""Expected string as input, found {type(A_ )}"""
raise ValueError(A_ )
if not isinstance(A_, A_ ):
_UpperCAmelCase : Dict = f"""Expected boolean as use_pascal parameter, found {type(A_ )}"""
raise ValueError(A_ )
_UpperCAmelCase : Optional[Any] = input_str.split("_" )
_UpperCAmelCase : str = 0 if use_pascal else 1
_UpperCAmelCase : Tuple = words[start_index:]
_UpperCAmelCase : Dict = [word[0].upper() + word[1:] for word in words_to_capitalize]
_UpperCAmelCase : Union[str, Any] = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 145
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Any=[1, 2, 1] , UpperCamelCase__ : int=[2, 2, 4] , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[int]=2.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Union[str, Any]=1E-5 , UpperCamelCase__ : str=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=10 , UpperCamelCase__ : Dict=8 , UpperCamelCase__ : Tuple=["stage1", "stage2", "stage3"] , UpperCamelCase__ : Tuple=[1, 2, 3] , ) -> Dict:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
__magic_name__ = window_size
__magic_name__ = mlp_ratio
__magic_name__ = qkv_bias
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = drop_path_rate
__magic_name__ = hidden_act
__magic_name__ = use_absolute_embeddings
__magic_name__ = patch_norm
__magic_name__ = layer_norm_eps
__magic_name__ = initializer_range
__magic_name__ = is_training
__magic_name__ = scope
__magic_name__ = use_labels
__magic_name__ = type_sequence_label_size
__magic_name__ = encoder_stride
__magic_name__ = out_features
__magic_name__ = out_indices
def _lowercase ( self : str ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ )
__magic_name__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__magic_name__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
__magic_name__ = MaskFormerSwinBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCamelCase__ ):
__magic_name__ = ["""stem"""]
__magic_name__ = MaskFormerSwinBackbone(config=UpperCamelCase__ )
def _lowercase ( self : Any ) -> Any:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
a__ = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def _lowercase ( self : Any ) -> List[str]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _lowercase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def _lowercase ( self : str ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return
def _lowercase ( self : str ) -> str:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _lowercase ( self : Any ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _lowercase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
def _lowercase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _lowercase ( self : List[str] ) -> Dict:
"""simple docstring"""
pass
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__magic_name__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__magic_name__ = outputs.hidden_states
__magic_name__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# Swin has a different seq_length
__magic_name__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__magic_name__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__magic_name__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _lowercase ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCamelCase__ : Union[str, Any] ):
__magic_name__ = 0
return t
def check_equivalence(UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int={} ):
with torch.no_grad():
__magic_name__ = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ).to_tuple()
def recursive_check(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ):
if isinstance(UpperCamelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCamelCase__ , UpperCamelCase__ ):
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCamelCase__ ) , set_nan_tensor_to_zero(UpperCamelCase__ ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}. Dict has'''
F''' `nan`: {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}.'''
) , )
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _A ):
'''simple docstring'''
a__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
a__ = MaskFormerSwinConfig
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModelTester(self )
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__magic_name__ = backbone_class(UpperCamelCase__ )
backbone.to(UpperCamelCase__ )
backbone.eval()
__magic_name__ = backbone(**UpperCamelCase__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCamelCase__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__magic_name__ = backbone(**UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__magic_name__ , __magic_name__ , __magic_name__ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__magic_name__ = backbone(**UpperCamelCase__ , output_attentions=UpperCamelCase__ )
self.assertIsNotNone(outputs.attentions )
| 88
| 0
|
from __future__ import annotations
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None ):
'''simple docstring'''
if start is None:
snake_case_ = 0
if end is None:
snake_case_ = len(UpperCamelCase__ ) - 1
if start >= end:
return
snake_case_ = (start + end) // 2
slowsort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
slowsort(UpperCamelCase__ , mid + 1 , UpperCamelCase__ )
if sequence[end] < sequence[mid]:
snake_case_ , snake_case_ = sequence[mid], sequence[end]
slowsort(UpperCamelCase__ , UpperCamelCase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 367
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=False , snake_case=False , snake_case=False , snake_case=2 , snake_case=99 , snake_case=0 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=2 , snake_case=4 , snake_case="last" , snake_case=True , snake_case=None , snake_case=0 , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_lengths
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = gelu_activation
snake_case_ = sinusoidal_embeddings
snake_case_ = causal
snake_case_ = asm
snake_case_ = n_langs
snake_case_ = vocab_size
snake_case_ = n_special
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = summary_type
snake_case_ = use_proj
snake_case_ = scope
snake_case_ = bos_token_id
def a ( self ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_input_lengths:
snake_case_ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , 2 ).float()
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a ( self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case , lengths=snake_case , langs=snake_case )
snake_case_ = model(snake_case , langs=snake_case )
snake_case_ = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMWithLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMForQuestionAnsweringSimple(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case )
snake_case_ = model(snake_case , start_positions=snake_case , end_positions=snake_case )
snake_case_ = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMForQuestionAnswering(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case )
snake_case_ = model(
snake_case , start_positions=snake_case , end_positions=snake_case , cls_index=snake_case , is_impossible=snake_case , p_mask=snake_case , )
snake_case_ = model(
snake_case , start_positions=snake_case , end_positions=snake_case , cls_index=snake_case , is_impossible=snake_case , )
((snake_case_) , ) = result_with_labels.to_tuple()
snake_case_ = model(snake_case , start_positions=snake_case , end_positions=snake_case )
((snake_case_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case )
snake_case_ = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = self.num_labels
snake_case_ = XLMForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = self.num_choices
snake_case_ = XLMForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a ( self ):
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowercase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : Tuple = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__SCREAMING_SNAKE_CASE : int = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a ( self , snake_case , snake_case , snake_case=False ):
snake_case_ = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def a ( self ):
snake_case_ = XLMModelTester(self )
snake_case_ = ConfigTester(self , config_class=snake_case , emb_dim=37 )
def a ( self ):
self.config_tester.run_common_tests()
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*snake_case )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=False , snake_case=1 ):
self.assertIsInstance(snake_case , snake_case )
self.assertListEqual(
[isinstance(snake_case , snake_case ) for iter_attentions in attentions] , [True] * len(snake_case ) )
self.assertEqual(len(snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(snake_case ):
# adds PAD dummy token
snake_case_ = min_length + idx + 1
snake_case_ = min_length + idx + 1
snake_case_ = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(snake_case ) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=False , snake_case=1 ):
self.assertIsInstance(snake_case , snake_case )
self.assertListEqual(
[isinstance(snake_case , snake_case ) for iter_hidden_states in hidden_states] , [True] * len(snake_case ) , )
self.assertEqual(len(snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(snake_case ):
# adds PAD dummy token
snake_case_ = min_length + idx + 1
snake_case_ = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(snake_case ) , )
pass
@slow
def a ( self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = XLMModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def a ( self ):
snake_case_ = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(snake_case )
snake_case_ = torch.tensor([[14, 447]] , dtype=torch.long , device=snake_case ) # the president
snake_case_ = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
snake_case_ = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , snake_case )
| 200
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A : Dict = "▁"
__A : int = {"vocab_file": "spiece.model"}
__A : str = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
__A : str = {
"google/pegasus-xsum": 512,
}
__A : Optional[int] = logging.get_logger(__name__)
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , lowerCamelCase : str , lowerCamelCase : int="<pad>" , lowerCamelCase : List[str]="</s>" , lowerCamelCase : Union[str, Any]="<unk>" , lowerCamelCase : str="<mask_2>" , lowerCamelCase : Tuple="<mask_1>" , lowerCamelCase : List[str]=None , lowerCamelCase : Optional[int]=1_03 , lowerCamelCase : Optional[Dict[str, Any]] = None , **lowerCamelCase : Tuple , ) -> None:
lowerCAmelCase_ : Union[str, Any] = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError(
F'additional_special_tokens should be of type {type(lowerCamelCase )}, but is'
F' {type(lowerCamelCase )}' )
lowerCAmelCase_ : str = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'<unk_{i}>' for i in range(len(lowerCamelCase ) , self.offset - 1 )
]
if len(set(lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
lowerCAmelCase_ : int = additional_special_tokens_extended
else:
lowerCAmelCase_ : List[str] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'<unk_{i}>' for i in range(2 , self.offset )]
lowerCAmelCase_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCamelCase , unk_token=lowerCamelCase , mask_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token_sent=lowerCamelCase , offset=lowerCamelCase , additional_special_tokens=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
lowerCAmelCase_ : str = mask_token_sent
lowerCAmelCase_ : Union[str, Any] = vocab_file
lowerCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase )
# add special tokens to encoder dict
lowerCAmelCase_ : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowerCAmelCase_ : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def __lowercase ( self : List[Any] ) -> int:
return len(self.sp_model ) + self.offset
def __lowercase ( self : Optional[Any] ) -> Dict[str, int]:
lowerCAmelCase_ : List[str] = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> Dict:
lowerCAmelCase_ : Dict = self.__dict__.copy()
lowerCAmelCase_ : Optional[Any] = None
return state
def __setstate__( self : Optional[Any] , lowerCamelCase : Optional[Any] ) -> str:
lowerCAmelCase_ : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase_ : Tuple = {}
lowerCAmelCase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowercase ( self : Dict , lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def __lowercase ( self : Any , lowerCamelCase : str ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowerCAmelCase_ : Union[str, Any] = self.sp_model.piece_to_id(lowerCamelCase )
return sp_id + self.offset
def __lowercase ( self : Any , lowerCamelCase : int ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowerCAmelCase_ : List[Any] = self.sp_model.IdToPiece(index - self.offset )
return token
def __lowercase ( self : Any , lowerCamelCase : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase_ : int = []
lowerCAmelCase_ : Dict = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase ) + token
lowerCAmelCase_ : Union[str, Any] = []
else:
current_sub_tokens.append(lowerCamelCase )
out_string += self.sp_model.decode(lowerCamelCase )
return out_string.strip()
def __lowercase ( self : Union[str, Any] , lowerCamelCase : int=False ) -> Optional[int]:
return 1
def __lowercase ( self : List[Any] , lowerCamelCase : Optional[Any] ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __lowercase ( self : List[Any] , lowerCamelCase : List , lowerCamelCase : Optional[List] = None , lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __lowercase ( self : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __lowercase ( self : Tuple , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase_ : Dict = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , """wb""" ) as fi:
lowerCAmelCase_ : str = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
| 120
|
'''simple docstring'''
from math import ceil
def UpperCamelCase_ ( A__ : int = 10_01 ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase_ : int = 2 * i + 1
lowerCAmelCase_ : Tuple = 2 * i
lowerCAmelCase_ : Tuple = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__A : str = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 120
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class A__ ( lowercase_ ):
_UpperCAmelCase :Any = "speech_to_text"
_UpperCAmelCase :List[str] = ["past_key_values"]
_UpperCAmelCase :List[str] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , A_=1_0000 , A_=12 , A_=2048 , A_=4 , A_=6 , A_=2048 , A_=4 , A_=0.0 , A_=0.0 , A_=True , A_=True , A_="relu" , A_=256 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=2 , A_=True , A_=1 , A_=0 , A_=2 , A_=6000 , A_=1024 , A_=2 , A_=(5, 5) , A_=1024 , A_=80 , A_=1 , **A_ , ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : List[str] = d_model
UpperCamelCase : Any = encoder_ffn_dim
UpperCamelCase : List[str] = encoder_layers
UpperCamelCase : Optional[int] = encoder_attention_heads
UpperCamelCase : str = decoder_ffn_dim
UpperCamelCase : Optional[int] = decoder_layers
UpperCamelCase : int = decoder_attention_heads
UpperCamelCase : Optional[Any] = dropout
UpperCamelCase : int = attention_dropout
UpperCamelCase : List[Any] = activation_dropout
UpperCamelCase : Union[str, Any] = activation_function
UpperCamelCase : Dict = init_std
UpperCamelCase : Any = encoder_layerdrop
UpperCamelCase : List[Any] = decoder_layerdrop
UpperCamelCase : Optional[int] = use_cache
UpperCamelCase : Dict = encoder_layers
UpperCamelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase : Union[str, Any] = max_source_positions
UpperCamelCase : str = max_target_positions
UpperCamelCase : Union[str, Any] = num_conv_layers
UpperCamelCase : List[Any] = list(a__ )
UpperCamelCase : str = conv_channels
UpperCamelCase : Tuple = input_feat_per_channel
UpperCamelCase : Dict = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
F"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , **a__ , )
| 354
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCamelCase : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids
UpperCamelCase : int = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
UpperCamelCase : Union[str, Any] = model(A_ , labels=A_ ).loss
UpperCamelCase : List[str] = -tf.math.reduce_mean(A_ ).numpy()
UpperCamelCase : Union[str, Any] = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 140
| 0
|
class A__ :
def __init__( self , __magic_name__ ):
lowerCamelCase : str = size
lowerCamelCase : List[str] = [0] * size
lowerCamelCase : Tuple = [0] * size
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
return index | (index + 1)
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
return (index & (index + 1)) - 1
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : Union[str, Any] = value
while index < self.size:
lowerCamelCase : List[str] = self.get_prev(__magic_name__ ) + 1
if current_left_border == index:
lowerCamelCase : List[str] = value
else:
lowerCamelCase : int = max(__magic_name__ , __magic_name__ , __magic_name__ )
lowerCamelCase : Union[str, Any] = self.get_next(__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
right -= 1 # Because of right is exclusive
lowerCamelCase : Optional[Any] = 0
while left <= right:
lowerCamelCase : List[Any] = self.get_prev(__magic_name__ )
if left <= current_left:
lowerCamelCase : str = max(__magic_name__ , self.tree[right] )
lowerCamelCase : Optional[Any] = current_left
else:
lowerCamelCase : int = max(__magic_name__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287
|
from scipy.stats import pearsonr
import datasets
_lowerCamelCase ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
_lowerCamelCase ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
_lowerCamelCase ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
if return_pvalue:
lowerCamelCase : Optional[Any] = pearsonr(__magic_name__ , __magic_name__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__magic_name__ , __magic_name__ )[0] )}
| 287
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Optional[int] = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Dict = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 0
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 12
|
import numpy
# List of input, output pairs
_UpperCAmelCase : List[str] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_UpperCAmelCase : Optional[Any] = (((515, 22, 13), 555), ((61, 35, 49), 150))
_UpperCAmelCase : Tuple = [2, 4, 1, 5]
_UpperCAmelCase : Union[str, Any] = len(train_data)
_UpperCAmelCase : Dict = 0.0_0_9
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase="train" ):
return calculate_hypothesis_value(lowerCamelCase, lowerCamelCase ) - output(
lowerCamelCase, lowerCamelCase )
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :str = 0
for i in range(len(lowerCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase=m ):
lowercase :Union[str, Any] = 0
for i in range(lowerCamelCase ):
if index == -1:
summation_value += _error(lowerCamelCase )
else:
summation_value += _error(lowerCamelCase ) * train_data[i][0][index]
return summation_value
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :int = summation_of_cost_derivative(lowerCamelCase, lowerCamelCase ) / m
return cost_derivative_value
def UpperCAmelCase__ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowercase :str = 0.000_002
lowercase :Tuple = 0
lowercase :Optional[int] = 0
while True:
j += 1
lowercase :Union[str, Any] = [0, 0, 0, 0]
for i in range(0, len(lowerCamelCase ) ):
lowercase :Dict = get_cost_derivative(i - 1 )
lowercase :Optional[Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCamelCase, lowerCamelCase, atol=lowerCamelCase, rtol=lowerCamelCase, ):
break
lowercase :Union[str, Any] = temp_parameter_vector
print(("Number of iterations:", j) )
def UpperCAmelCase__ ( ):
for i in range(len(lowerCamelCase ) ):
print(("Actual output value:", output(lowerCamelCase, "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(lowerCamelCase, "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 236
| 0
|
def a( A : Tuple ) -> str:
"""simple docstring"""
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
a = ''
while len(lowerCamelCase_ ) % 3 != 0:
a = '0' + bin_string
a = [
bin_string[index : index + 3]
for index in range(len(lowerCamelCase_ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
a = 0
for index, val in enumerate(lowerCamelCase_ ):
oct_val += int(2 ** (2 - index) * int(lowerCamelCase_ ) )
oct_string += str(lowerCamelCase_ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 367
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase: Dict = logging.get_logger()
@dataclass
class _lowercase :
"""simple docstring"""
__A = 42
__A = field(default_factory=lowerCAmelCase )
__A = field(default_factory=lowerCAmelCase )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = len(list(m.modules() ) ) == 1 or isinstance(lowerCamelCase_ , nn.Convad ) or isinstance(lowerCamelCase_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCamelCase_ )
def __call__(self , lowerCamelCase_ ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCamelCase_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return list(filter(lambda lowerCamelCase_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _lowercase :
"""simple docstring"""
__A = 42
__A = 42
__A = 1
__A = field(default_factory=lowerCAmelCase )
__A = field(default_factory=lowerCAmelCase )
__A = True
def __call__(self , lowerCamelCase_ ):
"""simple docstring"""
a = Tracker(self.dest )(lowerCamelCase_ ).parametrized
a = Tracker(self.src )(lowerCamelCase_ ).parametrized
a = list(filter(lambda lowerCamelCase_ : type(lowerCamelCase_ ) not in self.src_skip , lowerCamelCase_ ) )
a = list(filter(lambda lowerCamelCase_ : type(lowerCamelCase_ ) not in self.dest_skip , lowerCamelCase_ ) )
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ) and self.raise_if_mismatch:
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCamelCase_ )} operations while'''
F''' destination module has {len(lowerCamelCase_ )}.''' )
for dest_m, src_m in zip(lowerCamelCase_ , lowerCamelCase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ ):
"""simple docstring"""
super().__init__()
a = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), F'''Unexpected layer name {k}'''
a = len(lowerCamelCase_ ) + 1
feature_blocks.append((F'''res{block_index}''', v) )
a = nn.ModuleDict(lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
return get_trunk_forward_outputs(
lowerCamelCase_ , out_feat_keys=lowerCamelCase_ , feature_blocks=self._feature_blocks , )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__(self , lowerCamelCase_ ):
"""simple docstring"""
if x not in self:
a = self.convert_name_to_timm(lowerCamelCase_ )
a = partial(lambda: (timm.create_model(lowerCamelCase_ , pretrained=lowerCamelCase_ ).eval(), None) )
else:
a = super().__getitem__(lowerCamelCase_ )
return val
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __getitem__(self , lowerCamelCase_ ):
"""simple docstring"""
if "seer" in x and "in1k" not in x:
a = RegNetModel
else:
a = RegNetForImageClassification
return val
def a( A : Dict , A : List[Any] , A : List[Tuple[str, str]] ) -> Union[str, Any]:
"""simple docstring"""
for from_key, to_key in keys:
a = from_state_dict[from_key].clone()
print(f'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def a( A : str , A : Callable[[], nn.Module] , A : Callable[[], nn.Module] , A : RegNetConfig , A : Path , A : bool = True , ) -> List[str]:
"""simple docstring"""
print(f'''Converting {name}...''' )
with torch.no_grad():
a , a = from_model_func()
a = our_model_func(A ).eval()
a = ModuleTransfer(src=A , dest=A , raise_if_mismatch=A )
a = torch.randn((1, 3, 224, 224) )
module_transfer(A )
if from_state_dict is not None:
a = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
a = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
a = manually_copy_vissl_head(A , our_model.state_dict() , A )
our_model.load_state_dict(A )
a = our_model(A , output_hidden_states=A )
a = (
our_outputs.logits if isinstance(A , A ) else our_outputs.last_hidden_state
)
a = from_model(A )
a = from_output[-1] if type(A ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
a = our_outputs.hidden_states[-1]
assert torch.allclose(A , A ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=A , )
a = 224 if "seer" not in name else 384
# we can use the convnext one
a = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=A )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=A , )
print(f'''Pushed {name}''' )
def a( A : Path , A : str = None , A : bool = True ) -> Dict:
"""simple docstring"""
a = "imagenet-1k-id2label.json"
a = 1000
a = (1, num_labels)
a = "huggingface/label-files"
a = num_labels
a = json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) )
a = {int(A ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
a = partial(A , num_labels=A , idalabel=A , labelaid=A )
a = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
}
a = NameToOurModelFuncMap()
a = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(A : str , A : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
a = torch.hub.load_state_dict_from_url(A , model_dir=str(A ) , map_location="cpu" )
a = model_func()
# check if we have a head, if yes add it
a = files["classy_state_dict"]["base_model"]["model"]
a = model_state_dict["trunk"]
model.load_state_dict(A )
return model.eval(), model_state_dict["heads"]
# pretrained
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
A , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , A , A , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
A , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , A , A , A , )
return config, expected_shape
if __name__ == "__main__":
_lowercase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowercase: Optional[int] = parser.parse_args()
_lowercase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 71
| 0
|
'''simple docstring'''
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase_ ( _snake_case ):
lowercase = """facebook/bart-large-mnli"""
lowercase = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
lowercase = """text_classifier"""
lowercase = AutoTokenizer
lowercase = AutoModelForSequenceClassification
lowercase = ["""text""", ["""text"""]]
lowercase = ["""text"""]
def _lowercase( self ) -> str:
super().setup()
UpperCAmelCase : List[str] = self.model.config
UpperCAmelCase : Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
UpperCAmelCase : Optional[Any] = int(__snake_case )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def _lowercase( self , A , A ) -> Dict:
UpperCAmelCase : List[Any] = labels
return self.pre_processor(
[text] * len(__snake_case ) , [f'''This example is {label}''' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def _lowercase( self , A ) -> Optional[Any]:
UpperCAmelCase : Dict = outputs.logits
UpperCAmelCase : List[Any] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 265
|
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
try:
with open(SCREAMING_SNAKE_CASE__ , """rb""" ) as flax_state_f:
_SCREAMING_SNAKE_CASE : Dict = from_bytes(SCREAMING_SNAKE_CASE__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(SCREAMING_SNAKE_CASE__ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
_SCREAMING_SNAKE_CASE : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE__ : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE__ ) ).values()
if any(SCREAMING_SNAKE_CASE__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
_SCREAMING_SNAKE_CASE : Dict = jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = """"""
_SCREAMING_SNAKE_CASE : str = flatten_dict(SCREAMING_SNAKE_CASE__ , sep=""".""" )
_SCREAMING_SNAKE_CASE : str = pt_model.state_dict()
# keep track of unexpected & missing keys
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : int = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_SCREAMING_SNAKE_CASE : Any = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
_SCREAMING_SNAKE_CASE : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
_SCREAMING_SNAKE_CASE : List[str] = jnp.transpose(SCREAMING_SNAKE_CASE__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
_SCREAMING_SNAKE_CASE : Union[str, Any] = flax_key_tuple_array[:-1] + ["""weight"""]
_SCREAMING_SNAKE_CASE : Any = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
_SCREAMING_SNAKE_CASE : Optional[int] = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : Optional[int] = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
_SCREAMING_SNAKE_CASE : Tuple = """.""".join(SCREAMING_SNAKE_CASE__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE__ ) if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) else flax_tensor
_SCREAMING_SNAKE_CASE : int = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE__ )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# re-transform missing_keys to list
_SCREAMING_SNAKE_CASE : Optional[Any] = list(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
return pt_model
| 200
| 0
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowerCAmelCase_ = {
"E": 12.70,
"T": 9.06,
"A": 8.17,
"O": 7.51,
"I": 6.97,
"N": 6.75,
"S": 6.33,
"H": 6.09,
"R": 5.99,
"D": 4.25,
"L": 4.03,
"C": 2.78,
"U": 2.76,
"M": 2.41,
"W": 2.36,
"F": 2.23,
"G": 2.02,
"Y": 1.97,
"P": 1.93,
"B": 1.29,
"V": 0.98,
"K": 0.77,
"J": 0.15,
"X": 0.15,
"Q": 0.10,
"Z": 0.07,
}
lowerCAmelCase_ = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
lowerCAmelCase_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _snake_case ( lowerCAmelCase: int )-> List[str]:
_snake_case : int = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def _snake_case ( lowerCAmelCase: Dict )-> List[str]:
return x[0]
def _snake_case ( lowerCAmelCase: Tuple )-> List[Any]:
_snake_case : Tuple = get_letter_count(lowerCAmelCase__ )
_snake_case : int = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCAmelCase__ )
_snake_case : List[str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCAmelCase__ )
_snake_case : Any = ''.join(freq_to_letter[freq] )
_snake_case : Optional[int] = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCAmelCase__ , reverse=lowerCAmelCase__ )
_snake_case : Optional[Any] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCAmelCase__ )
def _snake_case ( lowerCAmelCase: Union[str, Any] )-> str:
_snake_case : Any = get_frequency_order(lowerCAmelCase__ )
_snake_case : Union[str, Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359
|
from math import ceil
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: Union[str, Any] )-> str:
_snake_case : Union[str, Any] = list(range(0 , lowerCAmelCase ) )
_snake_case : int = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_snake_case : Any = []
for i in device_map_blocks:
if device_map_blocks.count(lowerCAmelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowerCAmelCase )
# Missing blocks
_snake_case : Dict = [i for i in blocks if i not in device_map_blocks]
_snake_case : Tuple = [i for i in device_map_blocks if i not in blocks]
if len(lowerCAmelCase ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(lowerCAmelCase ) )
if len(lowerCAmelCase ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(lowerCAmelCase ) )
if len(lowerCAmelCase ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(lowerCAmelCase ) )
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: List[Any] )-> Optional[Any]:
_snake_case : int = list(range(lowerCAmelCase ) )
_snake_case : Union[str, Any] = int(ceil(n_layers / len(lowerCAmelCase ) ) )
_snake_case : Optional[Any] = [layers[i : i + n_blocks] for i in range(0 , lowerCAmelCase , lowerCAmelCase )]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
| 260
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__lowerCAmelCase = False
@skip_mps
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : List[Any] = StableDiffusionAttendAndExcitePipeline
lowerCAmelCase : str = False
lowerCAmelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
lowerCAmelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowercase ( cls : Tuple ):
super().setUpClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
@classmethod
def __lowercase ( cls : List[str] ):
super().tearDownClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
def __lowercase ( self : str ):
torch.manual_seed(0 )
_a : int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=1 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,attention_head_dim=(2, 4) ,use_linear_projection=_UpperCAmelCase ,)
_a : Dict = DDIMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=_UpperCAmelCase ,set_alpha_to_one=_UpperCAmelCase ,)
torch.manual_seed(0 )
_a : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
_a : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='gelu' ,projection_dim=512 ,)
_a : List[Any] = CLIPTextModel(_UpperCAmelCase )
_a : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_a : int = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int]=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
_a : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
_a : Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_a : Union[str, Any] = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def __lowercase ( self : Union[str, Any] ):
_a : Any = 'cpu'
_a : Optional[Any] = self.get_dummy_components()
_a : Optional[Any] = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_a : Dict = self.get_dummy_inputs(_UpperCAmelCase )
_a : int = pipe(**_UpperCAmelCase ).images
_a : Dict = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 64, 64, 3) )
_a : int = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
_a : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase ,1E-3 )
def __lowercase ( self : Union[str, Any] ):
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __lowercase ( self : List[Any] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowercase ( self : Union[str, Any] ):
self._test_inference_batch_single_identical(batch_size=2 ,expected_max_diff=7E-4 )
def __lowercase ( self : Any ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowercase ( self : Optional[Any] ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __lowercase ( self : Optional[Any] ):
super().test_save_load_local(expected_max_difference=5E-4 )
def __lowercase ( self : List[str] ):
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class __magic_name__ ( unittest.TestCase ):
@classmethod
def __lowercase ( cls : str ):
super().setUpClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
@classmethod
def __lowercase ( cls : Dict ):
super().tearDownClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
def __lowercase ( self : Dict ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : List[Any] ):
_a : List[str] = torch.manual_seed(51 )
_a : str = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,safety_checker=_UpperCAmelCase ,torch_dtype=torch.floataa )
pipe.to('cuda' )
_a : List[Any] = 'a painting of an elephant with glasses'
_a : Any = [5, 7]
_a : List[Any] = pipe(
prompt=_UpperCAmelCase ,token_indices=_UpperCAmelCase ,guidance_scale=7.5 ,generator=_UpperCAmelCase ,num_inference_steps=5 ,max_iter_to_alter=5 ,output_type='numpy' ,).images[0]
_a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 89
|
import logging
from transformers import PretrainedConfig
_UpperCAmelCase = logging.getLogger(__name__)
_UpperCAmelCase = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''bertabs'''
def __init__( self , lowercase=3_0_5_2_2 , lowercase=5_1_2 , lowercase=6 , lowercase=5_1_2 , lowercase=8 , lowercase=5_1_2 , lowercase=0.2 , lowercase=6 , lowercase=7_6_8 , lowercase=8 , lowercase=2_0_4_8 , lowercase=0.2 , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[int] = vocab_size
A_ : Union[str, Any] = max_pos
A_ : List[str] = enc_layers
A_ : Tuple = enc_hidden_size
A_ : List[Any] = enc_heads
A_ : str = enc_ff_size
A_ : Optional[Any] = enc_dropout
A_ : Dict = dec_layers
A_ : Optional[Any] = dec_hidden_size
A_ : int = dec_heads
A_ : Any = dec_ff_size
A_ : List[str] = dec_dropout
| 140
| 0
|
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase__ = """"""
lowerCAmelCase__ = """"""
lowerCAmelCase__ = """"""
lowerCAmelCase__ = """"""
def a__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase : int = tweepy.OAuthHandler(_UpperCAmelCase , _UpperCAmelCase )
auth.set_access_token(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = tweepy.API(_UpperCAmelCase )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase : Optional[int] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase : List[str] = api.user_timeline(screen_name=_UpperCAmelCase , count=2_0_0 )
# save most recent tweets
alltweets.extend(_UpperCAmelCase )
# save the id of the oldest tweet less one
lowerCAmelCase : Union[str, Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_UpperCAmelCase ) > 0:
print(f"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase : List[Any] = api.user_timeline(
screen_name=_UpperCAmelCase , count=2_0_0 , max_id=_UpperCAmelCase )
# save most recent tweets
alltweets.extend(_UpperCAmelCase )
# update the id of the oldest tweet less one
lowerCAmelCase : str = alltweets[-1].id - 1
print(f"""...{len(_UpperCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase : Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"""new_{screen_name}_tweets.csv""" , "w" ) as f:
lowerCAmelCase : Dict = csv.writer(_UpperCAmelCase )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(_UpperCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 351
|
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
lowerCAmelCase : str = len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
lowerCAmelCase , lowerCAmelCase : Any = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 133
| 0
|
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = credit_card_number
_UpperCAmelCase = 0
_UpperCAmelCase = len(_UpperCAmelCase ) - 2
for i in range(_UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
_UpperCAmelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_UpperCAmelCase = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = F"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(F"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(_UpperCAmelCase ) <= 16:
print(F"{error_message} of its length." )
return False
if not validate_initial_digits(_UpperCAmelCase ):
print(F"{error_message} of its first two digits." )
return False
if not luhn_validation(_UpperCAmelCase ):
print(F"{error_message} it fails the Luhn check." )
return False
print(F"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 339
|
UpperCAmelCase__ = {}
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_UpperCAmelCase = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_UpperCAmelCase = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , 0 )
_UpperCAmelCase = state_late + state_absent + state_ontime
_UpperCAmelCase = prizestrings
return prizestrings
def A ( _UpperCAmelCase : int = 30 ) -> int:
'''simple docstring'''
return _calculate(_UpperCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 339
| 1
|
"""simple docstring"""
class UpperCAmelCase_ :
def __init__( self : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] ) -> Dict:
_UpperCamelCase = name
_UpperCamelCase = value
_UpperCamelCase = weight
def __repr__( self : Optional[int] ) -> int:
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def _UpperCamelCase ( self : List[str] ) -> Tuple:
return self.value
def _UpperCamelCase ( self : int ) -> Optional[Any]:
return self.name
def _UpperCamelCase ( self : List[str] ) -> Optional[int]:
return self.weight
def _UpperCamelCase ( self : str ) -> List[str]:
return self.value / self.weight
def lowercase ( a__ : str , a__ : Tuple , a__ : Union[str, Any] ) -> Any:
_UpperCamelCase = []
for i in range(len(a__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def lowercase ( a__ : Union[str, Any] , a__ : List[Any] , a__ : Tuple ) -> Tuple:
_UpperCamelCase = sorted(a__ , key=a__ , reverse=a__ )
_UpperCamelCase = []
_UpperCamelCase , _UpperCamelCase = 0.0, 0.0
for i in range(len(a__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def lowercase ( ) -> List[str]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
"""simple docstring"""
import numpy as np
def lowercase ( a__ : Optional[Any] , a__ : str , a__ : Union[str, Any] , a__ : Any , a__ : List[str] ) -> Dict:
_UpperCamelCase = int(np.ceil((x_end - xa) / h ) )
_UpperCamelCase = np.zeros((n + 1,) )
_UpperCamelCase = ya
_UpperCamelCase = xa
for k in range(a__ ):
_UpperCamelCase = f(a__ , y[k] )
_UpperCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_UpperCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_UpperCamelCase = f(x + h , y[k] + h * ka )
_UpperCamelCase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
| 1
|
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=100 , _snake_case=13 , _snake_case=30 , _snake_case=2 , _snake_case=3 , _snake_case=True , _snake_case=True , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=10 , _snake_case=0.02 , _snake_case=3 , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = vocab_size
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase = (image_size // patch_size) ** 2
_lowerCAmelCase = num_patches + 1
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def snake_case ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = FlaxBeitModel(config=_snake_case )
_lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = FlaxBeitForMaskedImageModeling(config=_snake_case )
_lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def snake_case ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.type_sequence_label_size
_lowerCAmelCase = FlaxBeitForImageClassification(config=_snake_case )
_lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase = 1
_lowerCAmelCase = FlaxBeitForImageClassification(_snake_case )
_lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase = model(_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = FlaxBeitModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_snake_case )
_lowerCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase = self._prepare_for_class(_snake_case , _snake_case )
_lowerCAmelCase = model_class(_snake_case )
@jax.jit
def model_jitted(_snake_case , **_snake_case ):
return model(pixel_values=_snake_case , **_snake_case )
with self.subTest("""JIT Enabled""" ):
_lowerCAmelCase = model_jitted(**_snake_case ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_lowerCAmelCase = model_jitted(**_snake_case ).to_tuple()
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
for jitted_output, output in zip(_snake_case , _snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowerCAmelCase = model_class_name.from_pretrained("""microsoft/beit-base-patch16-224""" )
_lowerCAmelCase = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_snake_case )
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self ):
"""simple docstring"""
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = FlaxBeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_snake_case , return_tensors="""np""" ).pixel_values
# prepare bool_masked_pos
_lowerCAmelCase = np.ones((1, 196) , dtype=_snake_case )
# forward pass
_lowerCAmelCase = model(pixel_values=_snake_case , bool_masked_pos=_snake_case )
_lowerCAmelCase = outputs.logits
# verify the logits
_lowerCAmelCase = (1, 196, 8192)
self.assertEqual(logits.shape , _snake_case )
_lowerCAmelCase = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , _snake_case , atol=1e-2 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_snake_case , return_tensors="""np""" )
# forward pass
_lowerCAmelCase = model(**_snake_case )
_lowerCAmelCase = outputs.logits
# verify the logits
_lowerCAmelCase = (1, 1000)
self.assertEqual(logits.shape , _snake_case )
_lowerCAmelCase = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] , _snake_case , atol=1e-4 ) )
_lowerCAmelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , _snake_case )
@slow
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_snake_case , return_tensors="""np""" )
# forward pass
_lowerCAmelCase = model(**_snake_case )
_lowerCAmelCase = outputs.logits
# verify the logits
_lowerCAmelCase = (1, 21841)
self.assertEqual(logits.shape , _snake_case )
_lowerCAmelCase = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] , _snake_case , atol=1e-4 ) )
_lowerCAmelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , _snake_case )
| 82
|
def A ( a_ ) -> int:
__UpperCamelCase : Any =len(a_ )
while cur > 1:
# Find the maximum number in arr
__UpperCamelCase : Any =arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__UpperCamelCase : Any =arr[mi::-1] + arr[mi + 1 : len(a_ )]
# Reverse whole list
__UpperCamelCase : str =arr[cur - 1 :: -1] + arr[cur : len(a_ )]
cur -= 1
return arr
if __name__ == "__main__":
A_ :Dict = input('''Enter numbers separated by a comma:\n''').strip()
A_ :Any = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 71
| 0
|
import math
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str:
"""simple docstring"""
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(SCREAMING_SNAKE_CASE_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
SCREAMING_SNAKE_CASE :int = """Enter the base and the power separated by a comma: """
SCREAMING_SNAKE_CASE :Optional[int] = map(int, input(prompt).split(""","""))
SCREAMING_SNAKE_CASE :int = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
SCREAMING_SNAKE_CASE :Dict = res(xa, ya)
SCREAMING_SNAKE_CASE :Any = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 358
|
from ...processing_utils import ProcessorMixin
class __magic_name__ ( snake_case ):
UpperCamelCase_ :str = """SpeechT5FeatureExtractor"""
UpperCamelCase_ :Optional[int] = """SpeechT5Tokenizer"""
def __init__( self , _lowercase , _lowercase )-> Union[str, Any]:
super().__init__(_lowercase , _lowercase )
def __call__( self , *_lowercase , **_lowercase )-> Tuple:
UpperCamelCase_ = kwargs.pop("audio" , _lowercase )
UpperCamelCase_ = kwargs.pop("text" , _lowercase )
UpperCamelCase_ = kwargs.pop("text_target" , _lowercase )
UpperCamelCase_ = kwargs.pop("audio_target" , _lowercase )
UpperCamelCase_ = kwargs.pop("sampling_rate" , _lowercase )
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" )
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." )
if audio is not None:
UpperCamelCase_ = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
elif text is not None:
UpperCamelCase_ = self.tokenizer(_lowercase , **_lowercase )
else:
UpperCamelCase_ = None
if audio_target is not None:
UpperCamelCase_ = self.feature_extractor(audio_target=_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
UpperCamelCase_ = targets["input_values"]
elif text_target is not None:
UpperCamelCase_ = self.tokenizer(_lowercase , **_lowercase )
UpperCamelCase_ = targets["input_ids"]
else:
UpperCamelCase_ = None
if inputs is None:
return targets
if targets is not None:
UpperCamelCase_ = labels
UpperCamelCase_ = targets.get("attention_mask" )
if decoder_attention_mask is not None:
UpperCamelCase_ = decoder_attention_mask
return inputs
def UpperCAmelCase_ ( self , *_lowercase , **_lowercase )-> Optional[int]:
UpperCamelCase_ = kwargs.pop("input_values" , _lowercase )
UpperCamelCase_ = kwargs.pop("input_ids" , _lowercase )
UpperCamelCase_ = kwargs.pop("labels" , _lowercase )
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs." )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." )
if input_values is not None:
UpperCamelCase_ = self.feature_extractor.pad(_lowercase , *_lowercase , **_lowercase )
elif input_ids is not None:
UpperCamelCase_ = self.tokenizer.pad(_lowercase , **_lowercase )
else:
UpperCamelCase_ = None
if labels is not None:
if "input_ids" in labels or (isinstance(_lowercase , _lowercase ) and "input_ids" in labels[0]):
UpperCamelCase_ = self.tokenizer.pad(_lowercase , **_lowercase )
UpperCamelCase_ = targets["input_ids"]
else:
UpperCamelCase_ = self.feature_extractor.feature_size
UpperCamelCase_ = self.feature_extractor.num_mel_bins
UpperCamelCase_ = self.feature_extractor.pad(_lowercase , *_lowercase , **_lowercase )
UpperCamelCase_ = feature_size_hack
UpperCamelCase_ = targets["input_values"]
else:
UpperCamelCase_ = None
if inputs is None:
return targets
if targets is not None:
UpperCamelCase_ = labels
UpperCamelCase_ = targets.get("attention_mask" )
if decoder_attention_mask is not None:
UpperCamelCase_ = decoder_attention_mask
return inputs
def UpperCAmelCase_ ( self , *_lowercase , **_lowercase )-> int:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase_ ( self , *_lowercase , **_lowercase )-> int:
return self.tokenizer.decode(*_lowercase , **_lowercase )
| 60
| 0
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__A = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__A = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print("\n".join(upper_files) + "\n")
__A = [file for file in filepaths if " " in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print("\n".join(space_files) + "\n")
__A = [file for file in filepaths if "-" in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print("\n".join(hyphen_files) + "\n")
__A = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print("\n".join(nodir_files) + "\n")
__A = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 90
|
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowercase ( _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
_UpperCAmelCase = args.pruning_method
_UpperCAmelCase = args.threshold
_UpperCAmelCase = args.model_name_or_path.rstrip('''/''' )
_UpperCAmelCase = args.target_model_path
print(f'Load fine-pruned model from {model_name_or_path}' )
_UpperCAmelCase = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) )
_UpperCAmelCase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_UpperCAmelCase = tensor
print(f'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
_UpperCAmelCase = tensor
print(f'Copied layer {name}' )
elif "bias" in name:
_UpperCAmelCase = tensor
print(f'Copied layer {name}' )
else:
if pruning_method == "magnitude":
_UpperCAmelCase = MagnitudeBinarizer.apply(inputs=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_UpperCAmelCase = name[:-6]
_UpperCAmelCase = model[f'{prefix_}mask_scores']
_UpperCAmelCase = TopKBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_UpperCAmelCase = name[:-6]
_UpperCAmelCase = model[f'{prefix_}mask_scores']
_UpperCAmelCase = ThresholdBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_UpperCAmelCase = name[:-6]
_UpperCAmelCase = model[f'{prefix_}mask_scores']
_UpperCAmelCase , _UpperCAmelCase = -0.1, 1.1
_UpperCAmelCase = torch.sigmoid(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = s * (r - l) + l
_UpperCAmelCase = s_bar.clamp(min=0.0 , max=1.0 )
_UpperCAmelCase = tensor * mask
print(f'Pruned layer {name}' )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
_UpperCAmelCase = os.path.join(
os.path.dirname(_SCREAMING_SNAKE_CASE ) , f'bertarized_{os.path.basename(_SCREAMING_SNAKE_CASE )}' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
shutil.copytree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f'\nCreated folder {target_model_path}' )
torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
__A : Optional[int] = parser.parse_args()
main(args)
| 260
| 0
|
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = 0
__a = len(_UpperCAmelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__a = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_UpperCAmelCase ):
return None
__a = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__a = left
__a = point
elif point > right:
__a = right
__a = point
else:
if item < current_item:
__a = point - 1
else:
__a = point + 1
return None
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__a = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_UpperCAmelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
elif point > right:
return interpolation_search_by_recursion(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , point - 1 )
else:
return interpolation_search_by_recursion(
_UpperCAmelCase , _UpperCAmelCase , point + 1 , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
if collection != sorted(_UpperCAmelCase ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
__snake_case :Dict = 0
if debug == 1:
__snake_case :Dict = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
__snake_case :List[str] = 67
__snake_case :Dict = interpolation_search(collection, target)
if result is not None:
print(f'{target} found at positions: {result}')
else:
print('''Not found''')
| 362
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Dict = logging.get_logger(__name__)
__snake_case :List[Any] = {
'''tanreinama/GPTSAN-2.8B-spout_is_uniform''': (
'''https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'''
),
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[str] = '''gptsan-japanese'''
UpperCamelCase__ : Dict = [
'''past_key_values''',
]
UpperCamelCase__ : Dict = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]=36_000 , __SCREAMING_SNAKE_CASE : Tuple=1_280 , __SCREAMING_SNAKE_CASE : List[Any]=1_024 , __SCREAMING_SNAKE_CASE : List[Any]=8_192 , __SCREAMING_SNAKE_CASE : str=4_096 , __SCREAMING_SNAKE_CASE : Any=128 , __SCREAMING_SNAKE_CASE : int=10 , __SCREAMING_SNAKE_CASE : Optional[int]=0 , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : List[Any]=16 , __SCREAMING_SNAKE_CASE : Optional[Any]=128 , __SCREAMING_SNAKE_CASE : Tuple=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=1E-5 , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : List[str]="float32" , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : int=0.0_02 , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : int=35_998 , __SCREAMING_SNAKE_CASE : Optional[int]=35_995 , __SCREAMING_SNAKE_CASE : List[str]=35_999 , **__SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = d_ff
__a = d_ext
__a = d_spout
__a = num_switch_layers
__a = num_ext_layers
__a = num_switch_layers + num_ext_layers
__a = num_heads
__a = num_experts
__a = expert_capacity
__a = dropout_rate
__a = layer_norm_epsilon
__a = router_bias
__a = router_jitter_noise
__a = router_dtype
__a = router_ignore_padding_tokens
__a = output_hidden_states
__a = output_attentions
__a = initializer_factor
__a = output_router_logits
__a = use_cache
super().__init__(
separator_token_id=__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
| 131
| 0
|
'''simple docstring'''
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowercase : Tuple = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
lowercase : Optional[Any] = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
lowercase : Optional[int] = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase : Tuple = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
lowercase : Any = "allenai"
def SCREAMING_SNAKE_CASE__ ( __A ) -> List[Any]:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_snake_case = dict((re.sub(r'@@$' , '' , __A ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , __A ), v) for k, v in d.items() )
_snake_case = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
_snake_case = d[k] # restore
return da
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Union[str, Any]:
# prep
assert os.path.exists(__A )
os.makedirs(__A , exist_ok=__A )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
_snake_case = basename(__A )
_snake_case = dirname(__A )
_snake_case = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
_snake_case = cls.hub_models()
_snake_case = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
_snake_case = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'using checkpoint {checkpoint_file}' )
_snake_case = hub_utils.from_pretrained(
__A , __A , __A , archive_map=__A , **__A )
_snake_case = vars(chkpt['args']['model'] )
_snake_case = args['source_lang']
_snake_case = args['target_lang']
_snake_case = dirname(__A )
_snake_case = basename(__A )
# dicts
_snake_case = os.path.join(__A , F'dict.{src_lang}.txt' )
_snake_case = os.path.join(__A , F'dict.{tgt_lang}.txt' )
_snake_case = Dictionary.load(__A )
_snake_case = rewrite_dict_keys(src_dict.indices )
_snake_case = len(__A )
_snake_case = os.path.join(__A , 'vocab-src.json' )
print(F'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
_snake_case = True
for k in src_vocab.keys():
if not k.islower():
_snake_case = False
break
_snake_case = Dictionary.load(__A )
_snake_case = rewrite_dict_keys(tgt_dict.indices )
_snake_case = len(__A )
_snake_case = os.path.join(__A , 'vocab-tgt.json' )
print(F'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# merges_file (bpecodes)
_snake_case = os.path.join(__A , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
_snake_case = os.path.join(__A , __A )
if os.path.exists(__A ):
break
with open(__A , encoding='utf-8' ) as fin:
_snake_case = fin.read()
_snake_case = re.sub(r' \d+$' , '' , __A , 0 , re.M ) # remove frequency number
print(F'Generating {merges_file}' )
with open(__A , 'w' , encoding='utf-8' ) as fout:
fout.write(__A )
# model config
_snake_case = os.path.join(__A , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", F'need to extend tokenizer to support bpe={args["tokenizer"]}'
_snake_case = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.0_2,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
_snake_case = 5
_snake_case = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
_snake_case = best_score_hparams[model_dir]['length_penalty']
else:
_snake_case = 1.0
print(F'Generating {fsmt_model_config_file}' )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# tokenizer config
_snake_case = os.path.join(__A , __A )
_snake_case = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1_024,
'do_lower_case': do_lower_case,
}
print(F'Generating {fsmt_tokenizer_config_file}' )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# model
_snake_case = chkpt['models'][0]
_snake_case = model.state_dict()
# rename keys to start with 'model.'
_snake_case = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
_snake_case = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(__A , __A )
_snake_case = FSMTConfig.from_pretrained(__A )
_snake_case = FSMTForConditionalGeneration(__A )
# check that it loads ok
model_new.load_state_dict(__A , strict=__A )
# save
_snake_case = os.path.join(__A , __A )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(__A , __A )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(F'cd {data_root}' )
print(F'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase : Any = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 42
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_=1024 ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = [], []
_UpperCAmelCase = list(zip(snake_case_ , snake_case_ ) )
_UpperCAmelCase , _UpperCAmelCase = sorted_examples[0]
def is_too_big(snake_case_ ):
return tok(snake_case_ , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
_UpperCAmelCase = new_src + " " + src
_UpperCAmelCase = new_tgt + " " + tgt
if is_too_big(snake_case_ ) or is_too_big(snake_case_ ): # cant fit, finalize example
finished_src.append(snake_case_ )
finished_tgt.append(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = src, tgt
else: # can fit, keep adding
_UpperCAmelCase , _UpperCAmelCase = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(snake_case_ )
finished_tgt.append(snake_case_ )
return finished_src, finished_tgt
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = Path(snake_case_ )
save_path.mkdir(exist_ok=snake_case_ )
for split in ["train"]:
_UpperCAmelCase , _UpperCAmelCase = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
_UpperCAmelCase = [x.rstrip() for x in Path(snake_case_ ).open().readlines()]
_UpperCAmelCase = [x.rstrip() for x in Path(snake_case_ ).open().readlines()]
_UpperCAmelCase , _UpperCAmelCase = pack_examples(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
print(f"""packed {split} split from {len(snake_case_ )} examples -> {len(snake_case_ )}.""" )
Path(save_path / f"""{split}.source""" ).open("w" ).write("\n".join(snake_case_ ) )
Path(save_path / f"""{split}.target""" ).open("w" ).write("\n".join(snake_case_ ) )
for split in ["val", "test"]:
_UpperCAmelCase , _UpperCAmelCase = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(snake_case_ , save_path / f"""{split}.source""" )
shutil.copyfile(snake_case_ , save_path / f"""{split}.target""" )
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=snake_case_ , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=snake_case_ , default=128 )
parser.add_argument("--data_dir" , type=snake_case_ )
parser.add_argument("--save_path" , type=snake_case_ )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(snake_case_ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 133
| 0
|
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] , __UpperCAmelCase: int , __UpperCAmelCase: Optional[Any] ) -> Any:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__lowerCAmelCase , n - 1 , __lowerCAmelCase ) * a) % mod
else:
UpperCamelCase__ : List[Any] = binary_exponentiation(__lowerCAmelCase , n / 2 , __lowerCAmelCase )
return (b * b) % mod
# a prime number
UpperCAmelCase_ : int = 701
UpperCAmelCase_ : List[str] = 10_0000_0000
UpperCAmelCase_ : int = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 362
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] , __UpperCAmelCase: Optional[Any]=False ) -> List[Any]:
UpperCamelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase__ : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCAmelCase_ ( __UpperCAmelCase: Dict , __UpperCAmelCase: Any , __UpperCAmelCase: Dict=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase__ : Tuple = ''''''
else:
UpperCamelCase__ : Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Dict = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
UpperCamelCase__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ : int = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] ) -> Optional[Any]:
UpperCamelCase__ : int = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] , __UpperCAmelCase: str , __UpperCAmelCase: Tuple ) -> Dict:
UpperCamelCase__ : List[str] = dct.pop(__UpperCAmelCase )
UpperCamelCase__ : int = val
def lowerCAmelCase_ ( ) -> Tuple:
UpperCamelCase__ : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : int = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] , __UpperCAmelCase: Dict , __UpperCAmelCase: List[Any]=True ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
UpperCamelCase__ : List[str] = 8
# set labels if required
if not base_model:
UpperCamelCase__ : Union[str, Any] = 1000
UpperCamelCase__ : Optional[Any] = '''huggingface/label-files'''
UpperCamelCase__ : Dict = '''imagenet-1k-id2label.json'''
UpperCamelCase__ : str = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ : Dict = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ : str = idalabel
UpperCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
UpperCamelCase__ : str = 384
UpperCamelCase__ : str = 1536
UpperCamelCase__ : Tuple = 12
UpperCamelCase__ : Optional[int] = 6
# load original model from torch hub
UpperCamelCase__ : Any = torch.hub.load('''facebookresearch/dino:main''' , __UpperCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCamelCase__ : str = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCAmelCase )
UpperCamelCase__ : int = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# load HuggingFace model
if base_model:
UpperCamelCase__ : int = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase ).eval()
else:
UpperCamelCase__ : Optional[int] = ViTForImageClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
UpperCamelCase__ : Dict = ViTImageProcessor()
UpperCamelCase__ : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCamelCase__ : Optional[Any] = encoding['''pixel_values''']
UpperCamelCase__ : Optional[Any] = model(__UpperCAmelCase )
if base_model:
UpperCamelCase__ : Union[str, Any] = original_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
UpperCamelCase__ : Any = original_model(__UpperCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1e-3 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
UpperCAmelCase_ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 247
| 0
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ = 100_0000 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = set(range(3 , lowerCAmelCase_ , 2 ) )
primes.add(2 )
for p in range(3 , lowerCAmelCase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCAmelCase_ , lowerCAmelCase_ ) ) )
__SCREAMING_SNAKE_CASE = [float(lowerCAmelCase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCAmelCase_ , limit + 1 , lowerCAmelCase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 54
|
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any = CLIPTokenizer
snake_case__ : Dict = CLIPTokenizerFast
snake_case__ : List[Any] = True
snake_case__ : Optional[Any] = {}
snake_case__ : Dict = False
def UpperCAmelCase_ ( self : Any ) -> Any:
super().setUp()
# fmt: off
__SCREAMING_SNAKE_CASE = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
__SCREAMING_SNAKE_CASE = {"unk_token": "<unk>"}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase__ ) )
def UpperCAmelCase_ ( self : List[Any] , **UpperCAmelCase__ : Tuple ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Any , **UpperCAmelCase__ : Optional[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = "lower newer"
__SCREAMING_SNAKE_CASE = "lower newer"
return input_text, output_text
def UpperCAmelCase_ ( self : int ) -> List[str]:
__SCREAMING_SNAKE_CASE = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__SCREAMING_SNAKE_CASE = "lower newer"
__SCREAMING_SNAKE_CASE = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE = [1_0, 2, 1_6, 9, 3, 2, 1_6, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@require_ftfy
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__SCREAMING_SNAKE_CASE = "xa\u0303y" + " " + "x\xe3y"
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Test that the tokenization is identical on unicode of space type
__SCREAMING_SNAKE_CASE = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Test that the tokenization is identical on unicode of line break type
__SCREAMING_SNAKE_CASE = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
__SCREAMING_SNAKE_CASE = F"""{text_of_1_token} {text_of_1_token}"""
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase__ ) + 1, len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
__SCREAMING_SNAKE_CASE = F""" {text}"""
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase__ ) + 1, 1 + len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(UpperCAmelCase__ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
super().test_tokenization_python_rust_equals()
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
# CLIP always lower cases letters
pass
| 54
| 1
|
"""simple docstring"""
_lowercase : str = "Alexander Joslin"
import operator as op
from .stack import Stack
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] ={'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowerCamelCase__ : Stack[int] =Stack()
lowerCamelCase__ : Stack[str] =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__lowerCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__lowerCamelCase )
elif i == ")":
# RULE 4
lowerCamelCase__ : List[str] =operator_stack.peek()
operator_stack.pop()
lowerCamelCase__ : Tuple =operand_stack.peek()
operand_stack.pop()
lowerCamelCase__ : Tuple =operand_stack.peek()
operand_stack.pop()
lowerCamelCase__ : Optional[Any] =operators[opr](__lowerCamelCase , __lowerCamelCase )
operand_stack.push(__lowerCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_lowercase : Optional[Any] = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f'{equation} = {dijkstras_two_stack_algorithm(equation)}')
| 272
|
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[Any], lowerCamelCase : Dict="", lowerCamelCase : Tuple="train" )-> Dict:
assert os.path.isdir(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =[]
lowerCamelCase__ : Dict =os.listdir(lowerCamelCase )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCamelCase__ : Optional[int] =os.path.join(lowerCamelCase, lowerCamelCase )
if not os.path.isfile(lowerCamelCase ):
continue
self.documents.append(lowerCamelCase )
def __len__( self : Optional[Any] )-> List[str]:
return len(self.documents )
def __getitem__( self : List[str], lowerCamelCase : Dict )-> str:
lowerCamelCase__ : int =self.documents[idx]
lowerCamelCase__ : List[Any] =document_path.split('''/''' )[-1]
with open(lowerCamelCase, encoding='''utf-8''' ) as source:
lowerCamelCase__ : Optional[int] =source.read()
lowerCamelCase__ , lowerCamelCase__ : List[Any] =process_story(lowerCamelCase )
return document_name, story_lines, summary_lines
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : List[str] =list(filter(lambda __lowerCamelCase : len(__lowerCamelCase ) != 0 , [line.strip() for line in raw_story.split('''\n''' )] ) )
# for some unknown reason some lines miss a period, add it
lowerCamelCase__ : Dict =[_add_missing_period(__lowerCamelCase ) for line in nonempty_lines]
# gather article lines
lowerCamelCase__ : Union[str, Any] =[]
lowerCamelCase__ : Optional[Any] =deque(__lowerCamelCase )
while True:
try:
lowerCamelCase__ : Tuple =lines.popleft()
if element.startswith('''@highlight''' ):
break
story_lines.append(__lowerCamelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCamelCase__ : Dict =list(filter(lambda __lowerCamelCase : not t.startswith('''@highlight''' ) , __lowerCamelCase ) )
return story_lines, summary_lines
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Any =['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')''']
if line.startswith('''@highlight''' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
if len(__lowerCamelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__lowerCamelCase )) )
return sequence
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : int =torch.ones_like(__lowerCamelCase )
lowerCamelCase__ : Any =sequence == pad_token_id
lowerCamelCase__ : List[str] =0
return mask
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Dict =[tokenizer.encode(__lowerCamelCase ) for line in story_lines]
lowerCamelCase__ : List[Any] =[token for sentence in story_lines_token_ids for token in sentence]
lowerCamelCase__ : List[Any] =[tokenizer.encode(__lowerCamelCase ) for line in summary_lines]
lowerCamelCase__ : Optional[int] =[token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Any =[]
for sequence in batch:
lowerCamelCase__ : Optional[int] =-1
lowerCamelCase__ : List[str] =[]
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__lowerCamelCase )
return torch.tensor(__lowerCamelCase )
| 272
| 1
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class _A ( a__ ):
_UpperCamelCase : Any = '''Wav2Vec2FeatureExtractor'''
_UpperCamelCase : Union[str, Any] = '''AutoTokenizer'''
def __init__( self : List[Any] , _A : str , _A : int ) -> Any:
"""simple docstring"""
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
lowercase : List[str] = self.feature_extractor
lowercase : List[str] = False
@classmethod
def __a ( cls : Tuple , _A : Union[str, Any] , **_A : List[str] ) -> Dict:
"""simple docstring"""
try:
return super().from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , UpperCamelCase_ , )
lowercase : int = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase : Union[str, Any] = WavaVecaCTCTokenizer.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
return cls(feature_extractor=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
def __call__( self : Any , *_A : int , **_A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase_ , **UpperCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
lowercase : Tuple = kwargs.pop('''raw_speech''' )
else:
lowercase : Tuple = kwargs.pop('''audio''' , UpperCamelCase_ )
lowercase : List[str] = kwargs.pop('''sampling_rate''' , UpperCamelCase_ )
lowercase : int = kwargs.pop('''text''' , UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowercase : int = args[0]
lowercase : int = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
lowercase : Optional[int] = self.feature_extractor(UpperCamelCase_ , *UpperCamelCase_ , sampling_rate=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None:
lowercase : Optional[int] = self.tokenizer(UpperCamelCase_ , **UpperCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase : List[Any] = encodings['''input_ids''']
return inputs
def __a ( self : str , *_A : List[str] , **_A : Tuple ) -> Tuple:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*UpperCamelCase_ , **UpperCamelCase_ )
lowercase : Tuple = kwargs.pop('''input_features''' , UpperCamelCase_ )
lowercase : Optional[int] = kwargs.pop('''labels''' , UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowercase : Union[str, Any] = args[0]
lowercase : Optional[int] = args[1:]
if input_features is not None:
lowercase : Tuple = self.feature_extractor.pad(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
if labels is not None:
lowercase : Optional[Any] = self.tokenizer.pad(UpperCamelCase_ , **UpperCamelCase_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowercase : int = labels['''input_ids''']
return input_features
def __a ( self : Union[str, Any] , *_A : List[str] , **_A : Any ) -> Dict:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def __a ( self : Optional[Any] , *_A : Dict , **_A : Union[str, Any] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@contextmanager
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
lowercase : Union[str, Any] = True
lowercase : List[str] = self.tokenizer
yield
lowercase : Optional[Any] = self.feature_extractor
lowercase : Union[str, Any] = False
| 308
|
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _snake_case ( _snake_case : int ):
for param in module.parameters():
lowerCAmelCase : Optional[int] = False
def _snake_case ( ):
lowerCAmelCase : List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase : Any = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _snake_case ( _snake_case : Dict ):
lowerCAmelCase : Optional[int] = plt.imshow(_snake_case )
fig.axes.get_xaxis().set_visible(_snake_case )
fig.axes.get_yaxis().set_visible(_snake_case )
plt.show()
def _snake_case ( ):
lowerCAmelCase : List[str] = datetime.now()
lowerCAmelCase : Union[str, Any] = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 60
| 0
|
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
if index == r:
for j in range(_lowerCAmelCase ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCamelCase : int = arr[i]
combination_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index + 1 , _lowerCAmelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
# A temporary array to store all combination one by one
UpperCamelCase : int = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 0 , _lowerCAmelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__lowerCamelCase : Any = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 371
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> list[float]:
UpperCamelCase , UpperCamelCase : List[Any] = coefficient_matrix.shape
UpperCamelCase , UpperCamelCase : Optional[int] = constant_matrix.shape
if rowsa != colsa:
UpperCamelCase : List[Any] = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase )
if colsa != 1:
UpperCamelCase : Any = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase )
if rowsa != rowsa:
UpperCamelCase : Tuple = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(_lowerCAmelCase )
if len(_lowerCAmelCase ) != rowsa:
UpperCamelCase : Any = (
"Number of initial values must be equal to number of rows in coefficient "
F"""matrix but received {len(_lowerCAmelCase )} and {rowsa}"""
)
raise ValueError(_lowerCAmelCase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
UpperCamelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
UpperCamelCase , UpperCamelCase : str = table.shape
strictly_diagonally_dominant(_lowerCAmelCase )
# Iterates the whole matrix for given number of times
for _ in range(_lowerCAmelCase ):
UpperCamelCase : Optional[Any] = []
for row in range(_lowerCAmelCase ):
UpperCamelCase : Optional[int] = 0
for col in range(_lowerCAmelCase ):
if col == row:
UpperCamelCase : Union[str, Any] = table[row][col]
elif col == cols - 1:
UpperCamelCase : List[Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCamelCase : Dict = (temp + val) / denom
new_val.append(_lowerCAmelCase )
UpperCamelCase : List[str] = new_val
return [float(_lowerCAmelCase ) for i in new_val]
def A_ ( _lowerCAmelCase ) -> bool:
UpperCamelCase , UpperCamelCase : Dict = table.shape
UpperCamelCase : List[Any] = True
for i in range(0 , _lowerCAmelCase ):
UpperCamelCase : Union[str, Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140
| 0
|
"""simple docstring"""
from __future__ import annotations
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowerCAmelCase__ :List[str] = array[indexa], array[indexa]
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
if length > 1:
lowerCAmelCase__ :Optional[Any] = int(length / 2 )
for i in range(_a , low + middle ):
comp_and_swap(_a , _a , i + middle , _a )
bitonic_merge(_a , _a , _a , _a )
bitonic_merge(_a , low + middle , _a , _a )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
if length > 1:
lowerCAmelCase__ :Union[str, Any] = int(length / 2 )
bitonic_sort(_a , _a , _a , 1 )
bitonic_sort(_a , low + middle , _a , 0 )
bitonic_merge(_a , _a , _a , _a )
if __name__ == "__main__":
__A = input("""Enter numbers separated by a comma:\n""").strip()
__A = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 293
|
from copy import deepcopy
class _a :
def __init__( self : List[str] , _SCREAMING_SNAKE_CASE : list[int] | None = None , _SCREAMING_SNAKE_CASE : int | None = None )-> None:
if arr is None and size is not None:
lowerCAmelCase__ : str = size
lowerCAmelCase__ : Optional[Any] = [0] * size
elif arr is not None:
self.init(_SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Either arr or size must be specified''' )
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : list[int] )-> None:
lowerCAmelCase__ : int = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = deepcopy(_SCREAMING_SNAKE_CASE )
for i in range(1 , self.size ):
lowerCAmelCase__ : Optional[Any] = self.next_(_SCREAMING_SNAKE_CASE )
if j < self.size:
self.tree[j] += self.tree[i]
def UpperCAmelCase__( self : Optional[int] )-> list[int]:
lowerCAmelCase__ : Any = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowerCAmelCase__ : Dict = self.next_(_SCREAMING_SNAKE_CASE )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int )-> int:
return index + (index & (-index))
@staticmethod
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int )-> int:
return index - (index & (-index))
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int )-> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCAmelCase__ : List[str] = self.next_(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int )-> None:
self.add(_SCREAMING_SNAKE_CASE , value - self.get(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : int )-> int:
if right == 0:
return 0
lowerCAmelCase__ : Any = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCAmelCase__ : Dict = self.prev(_SCREAMING_SNAKE_CASE )
return result
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int )-> int:
return self.prefix(_SCREAMING_SNAKE_CASE ) - self.prefix(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : int )-> int:
return self.query(_SCREAMING_SNAKE_CASE , index + 1 )
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : int )-> int:
value -= self.tree[0]
if value < 0:
return -1
lowerCAmelCase__ : Any = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCAmelCase__ : List[Any] = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131
| 0
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ = None ) -> str:
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
lowercase : Optional[int] = quote(__magic_name__ )
return hfh.hf_hub_url(__magic_name__ , __magic_name__ , repo_type='''dataset''' , revision=__magic_name__ )
| 368
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : Tuple = '''xmod'''
def __init__( self : Optional[Any] , _A : Union[str, Any]=30_522 , _A : List[Any]=768 , _A : Optional[Any]=12 , _A : Any=12 , _A : Tuple=3_072 , _A : Optional[int]="gelu" , _A : List[Any]=0.1 , _A : str=0.1 , _A : List[Any]=512 , _A : List[str]=2 , _A : str=0.02 , _A : Any=1E-12 , _A : Union[str, Any]=1 , _A : List[Any]=0 , _A : Dict=2 , _A : int="absolute" , _A : Dict=True , _A : int=None , _A : List[str]=False , _A : Dict=2 , _A : int=False , _A : Optional[int]=True , _A : Any=True , _A : Optional[int]=("en_XX",) , _A : Any=None , **_A : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
lowercase : Optional[Any] = vocab_size
lowercase : Union[str, Any] = hidden_size
lowercase : Optional[Any] = num_hidden_layers
lowercase : Dict = num_attention_heads
lowercase : Union[str, Any] = hidden_act
lowercase : Tuple = intermediate_size
lowercase : List[str] = hidden_dropout_prob
lowercase : Union[str, Any] = attention_probs_dropout_prob
lowercase : Dict = max_position_embeddings
lowercase : Any = type_vocab_size
lowercase : Optional[Any] = initializer_range
lowercase : str = layer_norm_eps
lowercase : Tuple = position_embedding_type
lowercase : Optional[Any] = use_cache
lowercase : int = classifier_dropout
lowercase : Optional[int] = pre_norm
lowercase : Any = adapter_reduction_factor
lowercase : Union[str, Any] = adapter_layer_norm
lowercase : Optional[int] = adapter_reuse_layer_norm
lowercase : Optional[Any] = ln_before_adapter
lowercase : Union[str, Any] = list(_A )
lowercase : List[Any] = default_language
class _A ( _lowerCamelCase ):
@property
def __a ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 116
| 0
|
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : int = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = XLNetTokenizer
UpperCAmelCase__ = XLNetTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = True
def A_ ( self : List[str] ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Optional[Any] = XLNetTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : List[str] ) -> int:
lowerCamelCase__ : Tuple = '<s>'
lowerCamelCase__ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> Any:
lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<eod>' )
self.assertEqual(len(UpperCAmelCase ) , 1006 )
def A_ ( self : List[Any] ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def A_ ( self : int ) -> Tuple:
lowerCamelCase__ : Optional[int] = XLNetTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
lowerCamelCase__ : Optional[int] = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [285, 46, 10, 170, 382] )
lowerCamelCase__ : Union[str, Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCamelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
lowerCamelCase__ : List[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def A_ ( self : Any ) -> Any:
lowerCamelCase__ : str = XLNetTokenizer(UpperCAmelCase , do_lower_case=UpperCAmelCase )
lowerCamelCase__ : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + '',
'i',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['▁he', 'll', 'o'] )
def A_ ( self : Any ) -> int:
lowerCamelCase__ : str = XLNetTokenizer(UpperCAmelCase , do_lower_case=UpperCAmelCase )
lowerCamelCase__ : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
@slow
def A_ ( self : List[Any] ) -> Optional[int]:
lowerCamelCase__ : List[str] = XLNetTokenizer.from_pretrained('xlnet-base-cased' )
lowerCamelCase__ : List[Any] = tokenizer.encode('sequence builders' , add_special_tokens=UpperCAmelCase )
lowerCamelCase__ : Dict = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCAmelCase )
lowerCamelCase__ : str = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase__ : Any = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def A_ ( self : Optional[Any] ) -> List[str]:
# fmt: off
lowerCamelCase__ : Tuple = {'input_ids': [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , )
| 50
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase_ ( A_ ):
lowercase__ = '''openai/whisper-base'''
lowercase__ = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
lowercase__ = '''transcriber'''
lowercase__ = WhisperProcessor
lowercase__ = WhisperForConditionalGeneration
lowercase__ = ['''audio''']
lowercase__ = ['''text''']
def __magic_name__ ( self : Optional[int] , snake_case_ : Dict ) -> int:
'''simple docstring'''
return self.pre_processor(snake_case_ , return_tensors="pt" ).input_features
def __magic_name__ ( self : int , snake_case_ : int ) -> str:
'''simple docstring'''
return self.model.generate(inputs=snake_case_ )
def __magic_name__ ( self : Tuple , snake_case_ : Optional[Any] ) -> Any:
'''simple docstring'''
return self.pre_processor.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )[0]
| 247
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : str ,*lowercase__ : List[Any] ,**lowercase__ : Any ):
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 52
|
'''simple docstring'''
from math import sqrt
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' must been an int and positive"
__lowercase = True
# 0 and 1 are none primes.
if number <= 1:
__lowercase = False
for divisor in range(2 , int(round(sqrt(A__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowercase = False
break
# precondition
assert isinstance(A__ , A__ ), "'status' must been from type bool"
return status
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowercase = list(range(2 , n + 1 ) )
__lowercase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(A__ ) ):
for j in range(i + 1 , len(A__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowercase = 0
# filters actual prime numbers.
__lowercase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n > 2), "'N' must been an int and > 2"
__lowercase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(A__ ):
ans.append(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and number >= 0, "'number' must been an int and >= 0"
__lowercase = [] # this list will be returns of the function.
# potential prime number factors.
__lowercase = 2
__lowercase = number
if number == 0 or number == 1:
ans.append(A__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(A__ ):
while quotient != 1:
if is_prime(A__ ) and (quotient % factor == 0):
ans.append(A__ )
quotient /= factor
else:
factor += 1
else:
ans.append(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(A__ )
__lowercase = max(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type int"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(A__ )
__lowercase = min(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type int"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , A__ ), "compare bust been from type bool"
return number % 2 == 0
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , A__ ), "compare bust been from type bool"
return number % 2 != 0
def _A ( A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ ) and (number > 2) and is_even(A__ )
), "'number' must been an int, even and > 2"
__lowercase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowercase = get_prime_numbers(A__ )
__lowercase = len(A__ )
# run variable for while-loops.
__lowercase = 0
__lowercase = None
# exit variable. for break up the loops
__lowercase = True
while i < len_pn and loop:
__lowercase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowercase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(A__ , A__ )
and (len(A__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowercase = 0
while numbera != 0:
__lowercase = numbera % numbera
__lowercase = numbera
__lowercase = rest
# precondition
assert isinstance(A__ , A__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowercase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowercase = prime_factorization(A__ )
__lowercase = prime_factorization(A__ )
elif numbera == 1 or numbera == 1:
__lowercase = []
__lowercase = []
__lowercase = max(A__ , A__ )
__lowercase = 0
__lowercase = 0
__lowercase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowercase = prime_fac_a.count(A__ )
__lowercase = prime_fac_a.count(A__ )
for _ in range(max(A__ , A__ ) ):
ans *= n
else:
__lowercase = prime_fac_a.count(A__ )
for _ in range(A__ ):
ans *= n
done.append(A__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowercase = prime_fac_a.count(A__ )
for _ in range(A__ ):
ans *= n
done.append(A__ )
# precondition
assert isinstance(A__ , A__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 0), "'number' must been a positive int"
__lowercase = 0
__lowercase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(A__ ):
ans += 1
# precondition
assert isinstance(A__ , A__ ) and is_prime(
A__ ), "'ans' must been a prime number and from type int"
return ans
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
is_prime(A__ ) and is_prime(A__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowercase = p_number_a + 1 # jump to the next number
__lowercase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(A__ ):
number += 1
while number < p_number_a:
ans.append(A__ )
number += 1
# fetch the next prime number.
while not is_prime(A__ ):
number += 1
# precondition
assert (
isinstance(A__ , A__ )
and ans[0] != p_number_a
and ans[len(A__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 1), "'n' must been int and >= 1"
__lowercase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(A__ )
# precondition
assert ans[0] == 1 and ans[len(A__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number > 1
), "'number' must been an int and >= 1"
__lowercase = get_divisors(A__ )
# precondition
assert (
isinstance(A__ , A__ )
and (divisors[0] == 1)
and (divisors[len(A__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowercase = gcd(abs(A__ ) , abs(A__ ) )
# precondition
assert (
isinstance(A__ , A__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 0), "'n' must been a int and >= 0"
__lowercase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 0), "'n' must been an int and >= 0"
__lowercase = 0
__lowercase = 1
__lowercase = 1 # this will be return
for _ in range(n - 1 ):
__lowercase = ans
ans += fiba
__lowercase = tmp
return ans
| 52
| 1
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
__lowercase = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
__lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': 2_0_4_8,
}
def snake_case__ ( _A: Optional[Any] , _A: Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(_A , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase = json.loads(f.read() )
lowerCAmelCase = collections.OrderedDict()
lowerCAmelCase = collections.OrderedDict()
lowerCAmelCase = collections.OrderedDict()
with open(_A , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase = f.readlines()
lowerCAmelCase = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(_A ):
lowerCAmelCase = b
lowerCAmelCase = idx
for wd in b:
lowerCAmelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : str = ['''input_ids''', '''attention_mask''']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase="<|startoftext|>" , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , do_clean_text=__lowerCAmelCase , **__lowerCAmelCase , )
if not os.path.isfile(__lowerCAmelCase):
raise ValueError(
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""")
if not os.path.isfile(__lowerCAmelCase):
raise ValueError(
f"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""")
lowerCAmelCase = do_clean_text
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = load_vocab_and_emoji(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def a_ ( self):
"""simple docstring"""
return len(self.raw_vocab)
def a_ ( self):
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return self.subword_tokenizer.tokenize(__lowerCAmelCase , clean=self.do_clean_text)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return self.vocab.get(__lowerCAmelCase , self.vocab.get(self.unk_token))
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = """""".join(__lowerCAmelCase).strip()
return out_string
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase) + [self.eos_token_id])
if len(__lowerCAmelCase) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
return input_ids
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = 0
if os.path.isdir(__lowerCAmelCase):
lowerCAmelCase = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
lowerCAmelCase = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""])
else:
lowerCAmelCase = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
lowerCAmelCase = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
""" Please check that the vocabulary is not corrupted!""")
lowerCAmelCase = token_index
writer.write(""",""".join(__lowerCAmelCase) + """\n""")
index += 1
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""") as writer:
json.dump(self.emoji , __lowerCAmelCase)
return vocab_file, emoji_file
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = vocab # same as swe
lowerCAmelCase = ids_to_tokens # same as bpe
lowerCAmelCase = emoji
lowerCAmelCase = np.max([len(__lowerCAmelCase) for w in self.vocab.keys()])
lowerCAmelCase = re.compile(r"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""")
lowerCAmelCase = re.compile(r"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""")
lowerCAmelCase = re.compile(r"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""")
lowerCAmelCase = re.compile(
r"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""")
lowerCAmelCase = re.compile(
r"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""")
lowerCAmelCase = re.compile(
r"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""")
lowerCAmelCase = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
lowerCAmelCase = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
lowerCAmelCase = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks})
def __len__( self):
"""simple docstring"""
return len(self.ids_to_tokens)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.content_repattera.sub("""<URL>""" , __lowerCAmelCase)
lowerCAmelCase = self.content_repattera.sub("""<EMAIL>""" , __lowerCAmelCase)
lowerCAmelCase = self.content_repattera.sub("""<TEL>""" , __lowerCAmelCase)
lowerCAmelCase = self.content_repattera.sub("""<DATE>""" , __lowerCAmelCase)
lowerCAmelCase = self.content_repattera.sub("""<DATE>""" , __lowerCAmelCase)
lowerCAmelCase = self.content_repattera.sub("""<PRICE>""" , __lowerCAmelCase)
lowerCAmelCase = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
lowerCAmelCase = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""")
return content
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=False):
"""simple docstring"""
lowerCAmelCase = text.replace(""" """ , """<SP>""")
lowerCAmelCase = text.replace(""" """ , """<SP>""")
lowerCAmelCase = text.replace("""\r\n""" , """<BR>""")
lowerCAmelCase = text.replace("""\n""" , """<BR>""")
lowerCAmelCase = text.replace("""\r""" , """<BR>""")
lowerCAmelCase = text.replace("""\t""" , """<TAB>""")
lowerCAmelCase = text.replace("""—""" , """ー""")
lowerCAmelCase = text.replace("""−""" , """ー""")
for k, v in self.emoji["emoji"].items():
if k in text:
lowerCAmelCase = text.replace(__lowerCAmelCase , __lowerCAmelCase)
if clean:
lowerCAmelCase = self.clean_text(__lowerCAmelCase)
def check_simbol(__lowerCAmelCase):
lowerCAmelCase = x.encode()
if len(__lowerCAmelCase) == 1 and len(__lowerCAmelCase) == 2:
lowerCAmelCase = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0Xc2a1 and c <= 0Xc2bf)
or (c >= 0Xc780 and c <= 0Xc783)
or (c >= 0Xcab9 and c <= 0Xcbbf)
or (c >= 0Xcc80 and c <= 0Xcda2)
):
return True
return False
def checkuae(__lowerCAmelCase):
lowerCAmelCase = x.encode()
if len(__lowerCAmelCase) == 1 and len(__lowerCAmelCase) == 3:
lowerCAmelCase = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0Xe2_8080 and c <= 0Xe2_b07f:
return True
return False
lowerCAmelCase = 0
lowerCAmelCase = []
while pos < len(__lowerCAmelCase):
lowerCAmelCase = min(len(__lowerCAmelCase) , pos + self.maxlen + 1) if text[pos] == """<""" else pos + 3
lowerCAmelCase = [] # (token_id, token, pos)
for e in range(__lowerCAmelCase , __lowerCAmelCase , -1):
lowerCAmelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__lowerCAmelCase) > 2:
lowerCAmelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(__lowerCAmelCase) > 0:
# the smallest token_id is adopted
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase: x[0])[0]
result.append(__lowerCAmelCase)
lowerCAmelCase = e
else:
lowerCAmelCase = pos + 1
lowerCAmelCase = text[pos:end]
if check_simbol(__lowerCAmelCase):
result.append("""<KIGOU>""")
elif checkuae(__lowerCAmelCase):
result.append("""<U2000U2BFF>""")
else:
for i in wd.encode("""utf-8"""):
result.append("""<|byte%d|>""" % i)
lowerCAmelCase = end
return result
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase="\n"):
"""simple docstring"""
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(__lowerCAmelCase) > 0:
words.append(bytearray(__lowerCAmelCase).decode("""utf-8""" , errors="""replace"""))
lowerCAmelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word])
elif word == "<SP>":
words.append(""" """)
elif word == "<BR>":
words.append(__lowerCAmelCase)
elif word == "<TAB>":
words.append("""\t""")
elif word == "<BLOCK>":
words.append("""▀""")
elif word == "<KIGOU>":
words.append("""ǀ""")
elif word == "<U2000U2BFF>":
words.append("""‖""")
else:
words.append(__lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
words.append(bytearray(__lowerCAmelCase).decode("""utf-8""" , errors="""replace"""))
lowerCAmelCase = """""".join(__lowerCAmelCase)
return text
| 272
|
'''simple docstring'''
import os
import string
import sys
__lowercase = 1 << 8
__lowercase = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 2_7,
'''up''': 6_5 + ARROW_KEY_FLAG,
'''down''': 6_6 + ARROW_KEY_FLAG,
'''right''': 6_7 + ARROW_KEY_FLAG,
'''left''': 6_8 + ARROW_KEY_FLAG,
'''mod_int''': 9_1,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 5_0,
'''delete''': 5_1,
'''pg_up''': 5_3,
'''pg_down''': 5_4,
}
__lowercase = KEYMAP['''up''']
__lowercase = KEYMAP['''left''']
if sys.platform == "win32":
__lowercase = []
__lowercase = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(1_0):
__lowercase = ord(str(i))
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
lowerCAmelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase = cha[1]
else:
lowerCAmelCase = ch.decode(_A )
else:
lowerCAmelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase = sys.stdin.fileno()
lowerCAmelCase = termios.tcgetattr(_A )
try:
tty.setraw(_A )
lowerCAmelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def snake_case__ ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 272
| 1
|
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = grid.shape
__UpperCAmelCase : List[str] = [-1, 1, 0, 0]
__UpperCAmelCase : Optional[Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__UpperCAmelCase , __UpperCAmelCase : Tuple = [(0, source)], set()
__UpperCAmelCase : Any = np.full((rows, cols), np.inf )
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Union[str, Any] = np.empty((rows, cols), dtype=_UpperCAmelCase )
__UpperCAmelCase : Any = None
while queue:
((__UpperCAmelCase) , (__UpperCAmelCase)) : Optional[Any] = heappop(_UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__UpperCAmelCase : int = []
while (x, y) != source:
path.append((x, y) )
__UpperCAmelCase , __UpperCAmelCase : Tuple = predecessors[x, y]
path.append(_UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_UpperCAmelCase ) ):
__UpperCAmelCase , __UpperCAmelCase : int = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__UpperCAmelCase : Optional[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_UpperCAmelCase, (dist + 1, (nx, ny)) )
__UpperCAmelCase : List[str] = dist + 1
__UpperCAmelCase : int = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
def __init__( self : List[Any] , **UpperCAmelCase_ : Dict ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : List[str] , UpperCAmelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCAmelCase_ : Tuple ):
"""simple docstring"""
return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = {}
if "candidate_labels" in kwargs:
__UpperCAmelCase : Union[str, Any] = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
__UpperCAmelCase : int = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]="This is a photo of {}." ):
"""simple docstring"""
__UpperCAmelCase : Tuple = load_image(UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = self.image_processor(images=[image] , return_tensors=self.framework )
__UpperCAmelCase : Dict = candidate_labels
__UpperCAmelCase : Any = [hypothesis_template.format(UpperCAmelCase_ ) for x in candidate_labels]
__UpperCAmelCase : Optional[int] = self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework , padding=UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = [text_inputs]
return inputs
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = model_inputs.pop("candidate_labels" )
__UpperCAmelCase : str = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , UpperCAmelCase_ ):
__UpperCAmelCase : Tuple = text_inputs[0]
else:
# Batching case.
__UpperCAmelCase : Optional[int] = text_inputs[0][0]
__UpperCAmelCase : Any = self.model(**UpperCAmelCase_ , **UpperCAmelCase_ )
__UpperCAmelCase : Dict = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : Dict ):
"""simple docstring"""
__UpperCAmelCase : Any = model_outputs.pop("candidate_labels" )
__UpperCAmelCase : Tuple = model_outputs["logits"][0]
if self.framework == "pt":
__UpperCAmelCase : Union[str, Any] = logits.softmax(dim=-1 ).squeeze(-1 )
__UpperCAmelCase : Dict = probs.tolist()
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = [scores]
elif self.framework == "tf":
__UpperCAmelCase : Union[str, Any] = stable_softmax(UpperCAmelCase_ , axis=-1 )
__UpperCAmelCase : List[str] = probs.numpy().tolist()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__UpperCAmelCase : Dict = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(UpperCAmelCase_ , UpperCAmelCase_ ) , key=lambda UpperCAmelCase_ : -x[0] )
]
return result
| 37
| 1
|
'''simple docstring'''
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
A_ = [True] * n
A_ = False
A_ = False
A_ = True
for i in range(3, int(n**0.5 + 1 ), 2 ):
A_ = i * 2
while index < n:
A_ = False
A_ = index + i
A_ = [2]
for i in range(3, __lowercase, 2 ):
if is_prime[i]:
primes.append(__lowercase )
return primes
def UpperCAmelCase__ ( UpperCAmelCase__ = 99_99_66_66_33_33 ) -> str:
A_ = math.floor(math.sqrt(__lowercase ) ) + 1_00
A_ = prime_sieve(__lowercase )
A_ = 0
A_ = 0
A_ = primes[prime_index]
while (last_prime**2) <= limit:
A_ = primes[prime_index + 1]
A_ = last_prime**2
A_ = next_prime**2
# Get numbers divisible by lps(current)
A_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
A_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
A_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
A_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 162
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
"""simple docstring"""
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase )
| 140
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
lowercase__ : str = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
lowercase__ : Tuple = {
"facebook/bart-base": 10_24,
"facebook/bart-large": 10_24,
"facebook/bart-large-mnli": 10_24,
"facebook/bart-large-cnn": 10_24,
"facebook/bart-large-xsum": 10_24,
"yjernite/bart_eli5": 10_24,
}
class SCREAMING_SNAKE_CASE (_a ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ["""input_ids""", """attention_mask"""]
lowerCAmelCase = BartTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="replace" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase , **_UpperCAmelCase , )
__A : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , _UpperCAmelCase) != add_prefix_space:
__A : Union[str, Any] = getattr(_UpperCAmelCase , pre_tok_state.pop('type'))
__A : Tuple = add_prefix_space
__A : Union[str, Any] = pre_tok_class(**_UpperCAmelCase)
__A : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__A : Tuple = 'post_processor'
__A : Union[str, Any] = getattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase)
if tokenizer_component_instance:
__A : int = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__A : Optional[Any] = tuple(state['sep'])
if "cls" in state:
__A : str = tuple(state['cls'])
__A : Tuple = False
if state.get('add_prefix_space' , _UpperCAmelCase) != add_prefix_space:
__A : Optional[int] = add_prefix_space
__A : Optional[Any] = True
if state.get('trim_offsets' , _UpperCAmelCase) != trim_offsets:
__A : List[str] = trim_offsets
__A : List[str] = True
if changes_to_apply:
__A : Optional[int] = getattr(_UpperCAmelCase , state.pop('type'))
__A : Optional[Any] = component_class(**_UpperCAmelCase)
setattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else value
__A : Union[str, Any] = value
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
__A : str = kwargs.get('is_split_into_words' , _UpperCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
__A : Any = kwargs.get('is_split_into_words' , _UpperCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : Union[str, Any] = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase)
return tuple(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=None):
'''simple docstring'''
__A : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : Dict = [self.sep_token_id]
__A : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 368
|
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = 0
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__A : List[str] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertGreater(len(_UpperCAmelCase) , 0)
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__A : Any = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (GPTaTokenizer, GPTaTokenizerFast))
self.assertGreater(len(_UpperCAmelCase) , 0)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 12)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (RobertaTokenizer, RobertaTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 20)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = AutoConfig.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
# Check that tokenizer_type ≠ model_type
__A : Optional[Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 12)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_UpperCAmelCase , 'vocab.txt'))
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type='bert' , use_fast=_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_UpperCAmelCase , 'vocab.json'))
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_UpperCAmelCase , 'merges.txt'))
__A : str = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type='gpt2' , use_fast=_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_UpperCAmelCase , 'vocab.txt'))
__A : List[str] = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type='bert')
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_UpperCAmelCase , 'vocab.json'))
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_UpperCAmelCase , 'merges.txt'))
__A : List[str] = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type='gpt2')
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with pytest.raises(_UpperCAmelCase):
AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx')
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__A : List[Any] = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased')
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _UpperCAmelCase)
else:
self.assertEqual(tokenizer.do_lower_case , _UpperCAmelCase)
self.assertEqual(tokenizer.model_max_length , 512)
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_UpperCAmelCase , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ):
__A : str = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = TOKENIZER_MAPPING.values()
__A : Union[str, Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__)
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__)
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_UpperCAmelCase)
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=_UpperCAmelCase) , _UpperCAmelCase)
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased') , _UpperCAmelCase)
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=_UpperCAmelCase)
__A : str = 'Hello, world. How are you?'
__A : List[str] = tokenizer.tokenize(_UpperCAmelCase)
self.assertEqual('[UNK]' , tokens[0])
__A : Dict = AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=_UpperCAmelCase)
__A : List[Any] = tokenizer.tokenize(_UpperCAmelCase)
self.assertEqual('[UNK]' , tokens[0])
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config')
self.assertEqual(type(_UpperCAmelCase) , _UpperCAmelCase)
self.assertEqual(tokenizer.model_max_length , 512)
self.assertEqual(tokenizer.vocab_size , 3_0000)
self.assertEqual(tokenizer.unk_token , '[UNK]')
self.assertEqual(tokenizer.padding_side , 'right')
self.assertEqual(tokenizer.truncation_side , 'right')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Any = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , tokenizer.__class__)
self.assertEqual(tokenizera.vocab_size , 12)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = AutoTokenizer.from_pretrained('ctrl')
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = get_tokenizer_config('bert-base-cased')
__A : Optional[int] = config.pop('_commit_hash' , _UpperCAmelCase)
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_UpperCAmelCase , {'do_lower_case': False})
# This model does not have a tokenizer_config so we get back an empty dict.
__A : Dict = get_tokenizer_config(_UpperCAmelCase)
self.assertDictEqual(_UpperCAmelCase , {})
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__A : Any = AutoTokenizer.from_pretrained(_UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Any = get_tokenizer_config(_UpperCAmelCase)
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'] , 'BertTokenizer')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
try:
AutoConfig.register('custom' , _UpperCAmelCase)
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase):
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase)
__A : Optional[Any] = CustomTokenizer.from_pretrained(_UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : int = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
try:
AutoConfig.register('custom' , _UpperCAmelCase)
# Can register in two steps
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None))
AutoTokenizer.register(_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase):
AutoTokenizer.register(_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase)
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Optional[int] = BertTokenizerFast.from_pretrained(_UpperCAmelCase)
bert_tokenizer.save_pretrained(_UpperCAmelCase)
__A : Dict = CustomTokenizerFast.from_pretrained(_UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
__A : Any = AutoTokenizer.from_pretrained(_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaises(_UpperCAmelCase):
__A : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer')
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCAmelCase):
__A : Dict = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase)
__A : str = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Dict = AutoTokenizer.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase)
self.assertTrue(reloaded_tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast')
# Test we can also load the slow version
__A : Union[str, Any] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertTrue(reloaded_tokenizer.special_attribute_present)
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer')
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = False
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = NewTokenizer
lowerCAmelCase = False
try:
AutoConfig.register('custom' , _UpperCAmelCase)
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase)
AutoTokenizer.register(_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase)
# If remote code is not set, the default is to use local
__A : List[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer')
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertFalse(tokenizer.special_attribute_present)
__A : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertFalse(tokenizer.special_attribute_present)
# If remote code is disabled, we load the local one.
__A : Optional[Any] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertFalse(tokenizer.special_attribute_present)
__A : Any = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertFalse(tokenizer.special_attribute_present)
# If remote is enabled, we load from the Hub
__A : int = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertTrue(tokenizer.special_attribute_present)
__A : Optional[Any] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertTrue(tokenizer.special_attribute_present)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_UpperCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
# Test we can also load the slow version
__A : int = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase , 'bert-base is not a local folder and is not a valid model identifier'):
__A : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase , revision='aaaaaa')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
with RequestCounter() as counter:
__A : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 190
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __a ( _SCREAMING_SNAKE_CASE ) ->Dict:
if "model" in orig_key:
a__: Union[str, Any] = orig_key.replace('model.' , '' )
if "norm1" in orig_key:
a__: Tuple = orig_key.replace('norm1' , 'attention.output.LayerNorm' )
if "norm2" in orig_key:
a__: Optional[Any] = orig_key.replace('norm2' , 'output.LayerNorm' )
if "norm" in orig_key:
a__: List[str] = orig_key.replace('norm' , 'LayerNorm' )
if "transformer" in orig_key:
a__: List[Any] = orig_key.split('.' )[0].split('_' )[-1]
a__: Any = orig_key.replace(F'transformer_{layer_num}' , F'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
a__: int = orig_key.replace('mha.attn' , 'attention.self' )
if "mha" in orig_key:
a__: Optional[Any] = orig_key.replace('mha' , 'attention' )
if "W_q" in orig_key:
a__: Optional[int] = orig_key.replace('W_q' , 'self.query' )
if "W_k" in orig_key:
a__: List[Any] = orig_key.replace('W_k' , 'self.key' )
if "W_v" in orig_key:
a__: Dict = orig_key.replace('W_v' , 'self.value' )
if "ff1" in orig_key:
a__: Any = orig_key.replace('ff1' , 'intermediate.dense' )
if "ff2" in orig_key:
a__: List[Any] = orig_key.replace('ff2' , 'output.dense' )
if "ff" in orig_key:
a__: Union[str, Any] = orig_key.replace('ff' , 'output.dense' )
if "mlm_class" in orig_key:
a__: Optional[int] = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' )
if "mlm" in orig_key:
a__: List[Any] = orig_key.replace('mlm' , 'cls.predictions.transform' )
if "cls" not in orig_key:
a__: Dict = """yoso.""" + orig_key
return orig_key
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
for key in orig_state_dict.copy().keys():
a__: Any = orig_state_dict.pop(_lowerCAmelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
a__: Tuple = val
a__: Any = orig_state_dict["""cls.predictions.decoder.bias"""]
a__: Optional[Any] = torch.arange(_lowerCAmelCase ).expand((1, -1) ) + 2
return orig_state_dict
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
a__: Tuple = torch.load(_lowerCAmelCase , map_location='cpu' )["""model_state_dict"""]
a__: int = YosoConfig.from_json_file(_lowerCAmelCase )
a__: Optional[Any] = YosoForMaskedLM(_lowerCAmelCase )
a__: Any = convert_checkpoint_helper(config.max_position_embeddings , _lowerCAmelCase )
print(model.load_state_dict(_lowerCAmelCase ) )
model.eval()
model.save_pretrained(_lowerCAmelCase )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase__ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 290
|
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE_:Any = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def __UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A : Tuple = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , bootstrap_aggregation=_lowerCAmelCase , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
A : Tuple = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , bootstrap_aggregation=_lowerCAmelCase , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def __UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
A : Dict = """rougeLsum"""
A : str = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase , rouge_keys=[k] )[k]
A : Tuple = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def __UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = ["""rouge1""", """rouge2""", """rougeL"""]
A : Dict = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase , rouge_keys=_lowerCAmelCase )
A : List[str] = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase , rouge_keys=_lowerCAmelCase )
assert score_sep == score_no_sep
def __UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A : Optional[Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
A : Optional[Any] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase ) == calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase )
def __UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
A : Tuple = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
A : Union[str, Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
A : int = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , rouge_keys=["""rougeLsum"""] , newline_sep=_lowerCAmelCase )["""rougeLsum"""]
A : Optional[Any] = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def __UpperCamelCase ( ) -> Any:
"""simple docstring"""
A : Tuple = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
A : Optional[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
A : List[Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
| 116
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 352
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCamelCase_ ( lowerCamelCase__ ):
if is_torch_version("<" , "2.0.0" ) or not hasattr(lowerCamelCase__ , "_dynamo" ):
return False
return isinstance(lowerCamelCase__ , torch._dynamo.eval_frame.OptimizedModule )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = True ):
lowerCamelCase_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowerCamelCase_ = is_compiled_module(lowerCamelCase__ )
if is_compiled:
lowerCamelCase_ = model
lowerCamelCase_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = model.module
if not keep_fpaa_wrapper:
lowerCamelCase_ = getattr(lowerCamelCase__ , "forward" )
lowerCamelCase_ = model.__dict__.pop("_original_forward" , lowerCamelCase__ )
if original_forward is not None:
while hasattr(lowerCamelCase__ , "__wrapped__" ):
lowerCamelCase_ = forward.__wrapped__
if forward == original_forward:
break
lowerCamelCase_ = forward
if getattr(lowerCamelCase__ , "_converted_to_transformer_engine" , lowerCamelCase__ ):
convert_model(lowerCamelCase__ , to_transformer_engine=lowerCamelCase__ )
if is_compiled:
lowerCamelCase_ = model
lowerCamelCase_ = compiled_model
return model
def lowerCamelCase_ ( ):
PartialState().wait_for_everyone()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowerCamelCase__ , lowerCamelCase__ )
elif PartialState().local_process_index == 0:
torch.save(lowerCamelCase__ , lowerCamelCase__ )
@contextmanager
def lowerCamelCase_ ( **lowerCamelCase__ ):
for key, value in kwargs.items():
lowerCamelCase_ = str(lowerCamelCase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCamelCase_ ( lowerCamelCase__ ):
if not hasattr(lowerCamelCase__ , "__qualname__" ) and not hasattr(lowerCamelCase__ , "__name__" ):
lowerCamelCase_ = getattr(lowerCamelCase__ , "__class__" , lowerCamelCase__ )
if hasattr(lowerCamelCase__ , "__qualname__" ):
return obj.__qualname__
if hasattr(lowerCamelCase__ , "__name__" ):
return obj.__name__
return str(lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key, value in source.items():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = destination.setdefault(lowerCamelCase__ , {} )
merge_dicts(lowerCamelCase__ , lowerCamelCase__ )
else:
lowerCamelCase_ = value
return destination
def lowerCamelCase_ ( lowerCamelCase__ = None ):
if port is None:
lowerCamelCase_ = 2_9_5_0_0
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 47
| 0
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=True , _lowerCAmelCase="pt" ) -> int:
UpperCamelCase : Any = {"add_prefix_space": True} if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and not line.startswith(" " ) else {}
UpperCamelCase : List[str] = padding_side
return tokenizer(
[line] , max_length=_lowerCAmelCase , padding="max_length" if pad_to_max_length else None , truncation=_lowerCAmelCase , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , ) -> str:
UpperCamelCase : List[Any] = input_ids.ne(_lowerCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A__ ( __snake_case ):
def __init__( self , A_ , A_ , A_ , A_ , A_="train" , A_=None , A_=None , A_=None , A_="" , ):
'''simple docstring'''
super().__init__()
UpperCamelCase : str = Path(A_ ).joinpath(type_path + ".source" )
UpperCamelCase : Tuple = Path(A_ ).joinpath(type_path + ".target" )
UpperCamelCase : Tuple = self.get_char_lens(self.src_file )
UpperCamelCase : Optional[Any] = max_source_length
UpperCamelCase : List[str] = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
UpperCamelCase : Any = tokenizer
UpperCamelCase : Any = prefix
if n_obs is not None:
UpperCamelCase : Dict = self.src_lens[:n_obs]
UpperCamelCase : List[Any] = src_lang
UpperCamelCase : Optional[int] = tgt_lang
def __len__( self ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = index + 1 # linecache starts at 1
UpperCamelCase : Dict = self.prefix + linecache.getline(str(self.src_file ) , A_ ).rstrip("\n" )
UpperCamelCase : Dict = linecache.getline(str(self.tgt_file ) , A_ ).rstrip("\n" )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCamelCase : str = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A_ ) else self.tokenizer
)
UpperCamelCase : Any = self.tokenizer.generator if isinstance(self.tokenizer , A_ ) else self.tokenizer
UpperCamelCase : Dict = encode_line(A_ , A_ , self.max_source_length , "right" )
UpperCamelCase : Optional[int] = encode_line(A_ , A_ , self.max_target_length , "right" )
UpperCamelCase : List[Any] = source_inputs["input_ids"].squeeze()
UpperCamelCase : Dict = target_inputs["input_ids"].squeeze()
UpperCamelCase : str = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __UpperCamelCase( A_ ):
'''simple docstring'''
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = torch.stack([x["input_ids"] for x in batch] )
UpperCamelCase : Tuple = torch.stack([x["attention_mask"] for x in batch] )
UpperCamelCase : List[str] = torch.stack([x["decoder_input_ids"] for x in batch] )
UpperCamelCase : List[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A_ )
else self.tokenizer.pad_token_id
)
UpperCamelCase : Optional[int] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A_ )
else self.tokenizer.pad_token_id
)
UpperCamelCase : str = trim_batch(A_ , A_ )
UpperCamelCase , UpperCamelCase : Optional[Any] = trim_batch(A_ , A_ , attention_mask=A_ )
UpperCamelCase : Optional[int] = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
__lowerCamelCase : List[Any] = getLogger(__name__)
def A_ ( _lowerCAmelCase ) -> List[Any]:
return list(itertools.chain.from_iterable(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase ) -> None:
UpperCamelCase : Optional[int] = get_git_info()
save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , "git_log.json" ) )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=4 , **_lowerCAmelCase ) -> List[str]:
with open(_lowerCAmelCase , "w" ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase , indent=_lowerCAmelCase , **_lowerCAmelCase )
def A_ ( _lowerCAmelCase ) -> Any:
with open(_lowerCAmelCase ) as f:
return json.load(_lowerCAmelCase )
def A_ ( ) -> Any:
UpperCamelCase : List[Any] = git.Repo(search_parent_directories=_lowerCAmelCase )
UpperCamelCase : int = {
"repo_id": str(_lowerCAmelCase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List:
return list(map(_lowerCAmelCase , _lowerCAmelCase ) )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
with open(_lowerCAmelCase , "wb" ) as f:
return pickle.dump(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase ) -> Union[str, Any]:
def remove_articles(_lowerCAmelCase ):
return re.sub(r"\b(a|an|the)\b" , " " , _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase ):
UpperCamelCase : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Dict = normalize_answer(_lowerCAmelCase ).split()
UpperCamelCase : Union[str, Any] = normalize_answer(_lowerCAmelCase ).split()
UpperCamelCase : str = Counter(_lowerCAmelCase ) & Counter(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = sum(common.values() )
if num_same == 0:
return 0
UpperCamelCase : Tuple = 1.0 * num_same / len(_lowerCAmelCase )
UpperCamelCase : Optional[int] = 1.0 * num_same / len(_lowerCAmelCase )
UpperCamelCase : Any = (2 * precision * recall) / (precision + recall)
return fa
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
return normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = 0
for hypo, pred in zip(_lowerCAmelCase , _lowerCAmelCase ):
em += exact_match_score(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
em /= len(_lowerCAmelCase )
return {"em": em}
def A_ ( _lowerCAmelCase ) -> str:
return model_prefix.startswith("rag" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
UpperCamelCase : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCamelCase : Tuple = "dropout_rate"
for p in extra_params:
if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if not hasattr(_lowerCAmelCase , _lowerCAmelCase ) and not hasattr(_lowerCAmelCase , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(_lowerCAmelCase ) )
delattr(_lowerCAmelCase , _lowerCAmelCase )
continue
UpperCamelCase : Optional[Any] = p if hasattr(_lowerCAmelCase , _lowerCAmelCase ) else equivalent_param[p]
setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
delattr(_lowerCAmelCase , _lowerCAmelCase )
return hparams, config
| 52
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : str = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCamelCase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCamelCase : Optional[Any] = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
__lowerCamelCase : List[str] = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def A_ ( _lowerCAmelCase ) -> List[str]:
UpperCamelCase : Optional[Any] = None
# source code of `config_class`
UpperCamelCase : Tuple = inspect.getsource(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = _re_checkpoint.findall(_lowerCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
UpperCamelCase : Dict = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase : Any = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCamelCase : List[Any] = ckpt_name
break
return checkpoint
def A_ ( ) -> List[str]:
UpperCamelCase : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCamelCase : Union[str, Any] = get_checkpoint_from_config_class(_lowerCAmelCase )
UpperCamelCase : Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : Any = "\n".join(sorted(_lowerCAmelCase ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 52
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Any = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_lowercase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 362
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_lowercase : Tuple = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = field(default=lowerCamelCase_ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = super().to_dict()
for k, v in d.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : str = v.to_dict()
return d
| 264
| 0
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
|
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_lowerCAmelCase = datasets.logging.get_logger(__name__)
_lowerCAmelCase = '''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
_lowerCAmelCase = '''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
_lowerCAmelCase = '''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
_lowerCAmelCase = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage="""https://github.com/google-research/bleurt""" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Value("""string""" ,id="""sequence""" ),
} ) ,codebase_urls=["""https://github.com/google-research/bleurt"""] ,reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" )
lowerCAmelCase__ : str = """bleurt-base-128"""
if self.config_name.lower() in CHECKPOINT_URLS:
lowerCAmelCase__ : Union[str, Any] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
lowerCAmelCase__ : List[Any] = self.config_name.upper()
else:
raise KeyError(
F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
lowerCAmelCase__ : int = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
lowerCAmelCase__ : Dict = score.BleurtScorer(os.path.join(__UpperCAmelCase ,__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : Union[str, Any] = self.scorer.score(references=__UpperCAmelCase ,candidates=__UpperCAmelCase )
return {"scores": scores}
| 37
| 1
|
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def UpperCamelCase (lowercase_: Dict , lowercase_: Union[str, Any] , lowercase_: List[Any] , lowercase_: Union[str, Any] ) -> str:
A__ : str = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, nicht wahr?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A__ : Union[str, Any] = {
"""wmt16-en-de-dist-12-1""": [28.3, 27.52],
"""wmt16-en-de-dist-6-1""": [27.4, 27.11],
"""wmt16-en-de-12-1""": [26.9, 25.75],
}
A__ : Optional[int] = f"""{src_lang}-{tgt_lang}"""
A__ : Union[str, Any] = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=lowercase_ , exist_ok=lowercase_ )
A__ : str = os.path.join(lowercase_ , """README.md""" )
print(f"""Generating {path}""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(lowercase_ )
# make sure we are under the root of the project
A_ : str = Path(__file__).resolve().parent.parent.parent
A_ : List[Any] = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
A_ : Optional[int] = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 141
|
import argparse
from collections import defaultdict
def UpperCamelCase (lowercase_: List[str] , lowercase_: Optional[int] , lowercase_: Optional[Any] , lowercase_: Union[str, Any] , lowercase_: Any ) -> int:
A__ : Optional[Any] = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(lowercase_ , """r""" ) as f:
A__ : Union[str, Any] = f.readlines()
A__ : str = f"""class {class_name}("""
A__ : Optional[Any] = f"""{4 * ' '}def {test_name}("""
A__ : Union[str, Any] = f"""{8 * ' '}{correct_line.split()[0]}"""
A__ : Optional[int] = f"""{16 * ' '}{correct_line.split()[0]}"""
A__ : int = False
A__ : str = False
A__ : Tuple = False
A__ : Optional[int] = False
A__ : Optional[Any] = 0
A__ : Dict = 0
A__ : List[str] = []
for line in lines:
if line.startswith(lowercase_ ):
A__ : Dict = True
elif in_class and line.startswith(lowercase_ ):
A__ : Optional[Any] = True
elif in_class and in_func and (line.startswith(lowercase_ ) or line.startswith(lowercase_ )):
A__ : Tuple = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
A__ : Any = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
A__ : Dict = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * ' '}{correct_line}""" )
A__ : List[str] = False
else:
new_lines.append(lowercase_ )
with open(lowercase_ , """w""" ) as f:
for line in new_lines:
f.write(lowercase_ )
def UpperCamelCase (lowercase_: List[str] , lowercase_: Optional[Any]=None ) -> Any:
if fail is not None:
with open(lowercase_ , """r""" ) as f:
A__ : Dict = {l.strip() for l in f.readlines()}
else:
A__ : List[str] = None
with open(lowercase_ , """r""" ) as f:
A__ : int = f.readlines()
A__ : Union[str, Any] = defaultdict(lowercase_ )
for line in correct_lines:
A__ , A__ , A__ , A__ : Optional[int] = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
A_ : Optional[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 141
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.