code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , __magic_name__ : pyspark.sql.DataFrame , __magic_name__ : Optional[NamedSplit] = None , __magic_name__ : Optional[Features] = None , __magic_name__ : bool = True , __magic_name__ : str = None , __magic_name__ : bool = False , __magic_name__ : str = None , __magic_name__ : bool = True , __magic_name__ : str = "arrow" , **__magic_name__ : List[Any] , ) -> List[str]:
super().__init__(
split=__magic_name__ , features=__magic_name__ , cache_dir=__magic_name__ , keep_in_memory=__magic_name__ , streaming=__magic_name__ , **__magic_name__ , )
SCREAMING_SNAKE_CASE_ = load_from_cache_file
SCREAMING_SNAKE_CASE_ = file_format
SCREAMING_SNAKE_CASE_ = Spark(
df=__magic_name__ , features=__magic_name__ , cache_dir=__magic_name__ , working_dir=__magic_name__ , **__magic_name__ , )
def __A ( self : Tuple ) -> List[str]:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
SCREAMING_SNAKE_CASE_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__magic_name__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 140
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = VideoMAEConfig()
set_architecture_configs(__UpperCamelCase , __UpperCamelCase )
if "finetuned" not in model_name:
SCREAMING_SNAKE_CASE_ = False
if "finetuned" in model_name:
SCREAMING_SNAKE_CASE_ = "huggingface/label-files"
if "kinetics" in model_name:
SCREAMING_SNAKE_CASE_ = 4_0_0
SCREAMING_SNAKE_CASE_ = "kinetics400-id2label.json"
elif "ssv2" in model_name:
SCREAMING_SNAKE_CASE_ = 1_7_4
SCREAMING_SNAKE_CASE_ = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
return config
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if "small" in model_name:
SCREAMING_SNAKE_CASE_ = 3_8_4
SCREAMING_SNAKE_CASE_ = 1_5_3_6
SCREAMING_SNAKE_CASE_ = 1_2
SCREAMING_SNAKE_CASE_ = 1_6
SCREAMING_SNAKE_CASE_ = 1_2
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 1_9_2
SCREAMING_SNAKE_CASE_ = 7_6_8
elif "large" in model_name:
SCREAMING_SNAKE_CASE_ = 1_0_2_4
SCREAMING_SNAKE_CASE_ = 4_0_9_6
SCREAMING_SNAKE_CASE_ = 2_4
SCREAMING_SNAKE_CASE_ = 1_6
SCREAMING_SNAKE_CASE_ = 1_2
SCREAMING_SNAKE_CASE_ = 8
SCREAMING_SNAKE_CASE_ = 5_1_2
SCREAMING_SNAKE_CASE_ = 2_0_4_8
elif "huge" in model_name:
SCREAMING_SNAKE_CASE_ = 1_2_8_0
SCREAMING_SNAKE_CASE_ = 5_1_2_0
SCREAMING_SNAKE_CASE_ = 3_2
SCREAMING_SNAKE_CASE_ = 1_6
SCREAMING_SNAKE_CASE_ = 1_2
SCREAMING_SNAKE_CASE_ = 8
SCREAMING_SNAKE_CASE_ = 6_4_0
SCREAMING_SNAKE_CASE_ = 2_5_6_0
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def a__ ( __UpperCamelCase ):
if "encoder." in name:
SCREAMING_SNAKE_CASE_ = name.replace("encoder." , "" )
if "cls_token" in name:
SCREAMING_SNAKE_CASE_ = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE_ = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_ = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_ = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
SCREAMING_SNAKE_CASE_ = name.replace("attn" , "attention.self" )
if "attn" in name:
SCREAMING_SNAKE_CASE_ = name.replace("attn" , "attention.attention" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_ = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE_ = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE_ = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE_ = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
SCREAMING_SNAKE_CASE_ = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
SCREAMING_SNAKE_CASE_ = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_ = name.replace("head" , "classifier" )
return name
def a__ ( __UpperCamelCase , __UpperCamelCase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = orig_state_dict.pop(__UpperCamelCase )
if key.startswith("encoder." ):
SCREAMING_SNAKE_CASE_ = key.replace("encoder." , "" )
if "qkv" in key:
SCREAMING_SNAKE_CASE_ = key.split("." )
if key.startswith("decoder.blocks" ):
SCREAMING_SNAKE_CASE_ = config.decoder_hidden_size
SCREAMING_SNAKE_CASE_ = int(key_split[2] )
SCREAMING_SNAKE_CASE_ = "decoder.decoder_layers."
if "weight" in key:
SCREAMING_SNAKE_CASE_ = val[:dim, :]
SCREAMING_SNAKE_CASE_ = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_ = config.hidden_size
SCREAMING_SNAKE_CASE_ = int(key_split[1] )
SCREAMING_SNAKE_CASE_ = "videomae.encoder.layer."
if "weight" in key:
SCREAMING_SNAKE_CASE_ = val[:dim, :]
SCREAMING_SNAKE_CASE_ = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_ = val
return orig_state_dict
def a__ ( ):
SCREAMING_SNAKE_CASE_ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
SCREAMING_SNAKE_CASE_ = np.load(__UpperCamelCase )
return list(__UpperCamelCase )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = get_videomae_config(__UpperCamelCase )
if "finetuned" in model_name:
SCREAMING_SNAKE_CASE_ = VideoMAEForVideoClassification(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = VideoMAEForPreTraining(__UpperCamelCase )
# download original checkpoint, hosted on Google Drive
SCREAMING_SNAKE_CASE_ = "pytorch_model.bin"
gdown.cached_download(__UpperCamelCase , __UpperCamelCase , quiet=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase , map_location="cpu" )
if "model" in files:
SCREAMING_SNAKE_CASE_ = files["model"]
else:
SCREAMING_SNAKE_CASE_ = files["module"]
SCREAMING_SNAKE_CASE_ = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# verify model on basic input
SCREAMING_SNAKE_CASE_ = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
SCREAMING_SNAKE_CASE_ = prepare_video()
SCREAMING_SNAKE_CASE_ = image_processor(__UpperCamelCase , return_tensors="pt" )
if "finetuned" not in model_name:
SCREAMING_SNAKE_CASE_ = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits
SCREAMING_SNAKE_CASE_ = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 4_0_0] )
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 1_7_4] )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 1_4_0_8, 1_5_3_6] )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 1_4_0_8, 1_5_3_6] )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
SCREAMING_SNAKE_CASE_ = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 1_4_0_8, 1_5_3_6] )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 4_0_0] )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 4_0_0] )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 4_0_0] )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 4_0_0] )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 1_4_0_8, 1_5_3_6] )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 1_7_4] )
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 1_4_0_8, 1_5_3_6] )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 1_7_4] )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
SCREAMING_SNAKE_CASE_ = outputs.loss
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(__UpperCamelCase , organization="nielsr" )
if __name__ == "__main__":
A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
A : int = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 140
| 1
|
"""simple docstring"""
import math
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = len(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = int(math.floor(math.sqrt(_lowerCamelCase ) ) )
lowerCamelCase__ : Tuple = 0
while arr[min(_lowerCamelCase , _lowerCamelCase ) - 1] < x:
lowerCamelCase__ : Union[str, Any] = step
step += int(math.floor(math.sqrt(_lowerCamelCase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
lowerCamelCase__ : int = prev + 1
if prev == min(_lowerCamelCase , _lowerCamelCase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
A_ : Optional[int] = input("Enter numbers separated by a comma:\n").strip()
A_ : Dict = [int(item) for item in user_input.split(",")]
A_ : Tuple = int(input("Enter the number to be searched:\n"))
A_ : List[str] = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f"Number {x} is at index {res}")
| 696
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = 'Speech2TextFeatureExtractor'
lowerCamelCase__ : Dict = 'Speech2TextTokenizer'
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_, **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase__ : Optional[int] = kwargs.pop('raw_speech' )
else:
lowerCamelCase__ : int = kwargs.pop('audio', lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = kwargs.pop('sampling_rate', lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = kwargs.pop('text', lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
lowerCamelCase__ : List[str] = args[0]
lowerCamelCase__ : Any = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] = self.feature_extractor(lowerCamelCase_, *lowerCamelCase_, sampling_rate=lowerCamelCase_, **lowerCamelCase_ )
if text is not None:
lowerCamelCase__ : List[Any] = self.tokenizer(lowerCamelCase_, **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ : Tuple = encodings['input_ids']
return inputs
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@contextmanager
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = self.tokenizer
yield
lowerCamelCase__ : Optional[int] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
| 696
| 1
|
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE__ : str = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
SCREAMING_SNAKE_CASE__ : Tuple = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
SCREAMING_SNAKE_CASE__ : int = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def A ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
"""simple docstring"""
if return_pvalue:
__magic_name__ :str = pearsonr(__lowerCAmelCase , __lowerCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__lowerCAmelCase , __lowerCAmelCase )[0] )}
| 0
|
import math
from collections.abc import Iterator
from itertools import takewhile
def __lowercase ( snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(snake_case ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :str = 2
while True:
if is_prime(snake_case ):
yield num
num += 1
def __lowercase ( snake_case = 2_0_0_0_0_0_0 ):
"""simple docstring"""
return sum(takewhile(lambda snake_case : x < n, prime_generator() ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 0
| 1
|
'''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : int ) -> int:
return abs(_UpperCamelCase ) if a == 0 else greatest_common_divisor(b % a, _UpperCamelCase )
def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : int ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
A_ ,A_ = y, x % y
return abs(_UpperCamelCase )
def _UpperCAmelCase ( ) -> Union[str, Any]:
try:
A_ = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
A_ = int(nums[0] )
A_ = int(nums[1] )
print(
F'''greatest_common_divisor({num_a}, {num_a}) = '''
F'''{greatest_common_divisor(_UpperCamelCase, _UpperCamelCase )}''' )
print(F'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_UpperCamelCase, _UpperCamelCase )}''' )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 174
|
'''simple docstring'''
from collections import defaultdict
from math import gcd
def _UpperCAmelCase ( _UpperCamelCase : int = 1_50_00_00 ) -> int:
A_ = defaultdict(_UpperCamelCase )
A_ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1, _UpperCamelCase, 2 ):
if gcd(_UpperCamelCase, _UpperCamelCase ) > 1:
continue
A_ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_UpperCamelCase, limit + 1, _UpperCamelCase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 174
| 1
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase : Tuple = '''https://openaipublic.azureedge.net/jukebox/models/'''
UpperCamelCase : Dict = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def UpperCamelCase_ ( __a ) -> List[Any]:
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
a__ : Union[str, Any] = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
a__ : Any = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
a__ : Tuple = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
a__ : List[Any] = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
a__ : Union[str, Any] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
a__ : List[Any] = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
a__ : Optional[Any] = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
a__ : List[str] = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[Any]:
a__ : Tuple = {}
import re
a__ : Union[str, Any] = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
a__ : List[str] = re.compile(
R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
a__ : Optional[int] = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
a__ : Any = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
a__ : List[Any] = re.compile(
R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
a__ : Any = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
a__ : Tuple = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
a__ : int = re.compile(
R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
a__ : Dict = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCamelCase ):
a__ : List[Any] = re_encoder_block_conv_in.match(_UpperCamelCase )
a__ : str = regex_match.groups()
a__ : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] )
a__ : List[Any] = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
a__ : List[str] = re_encoder_block_conv_in.sub(_UpperCamelCase , _UpperCamelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCamelCase ):
a__ : List[str] = re_encoder_block_resnet.match(_UpperCamelCase )
a__ : Any = regex_match.groups()
a__ : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] )
a__ : Optional[int] = {"1": 1, "3": 2}[groups[-2]]
a__ : Tuple = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
a__ : Optional[Any] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
a__ : int = prefix + resnet_block
a__ : Any = re_encoder_block_resnet.sub(_UpperCamelCase , _UpperCamelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCamelCase ):
a__ : Tuple = re_encoder_block_proj_out.match(_UpperCamelCase )
a__ : Union[str, Any] = regex_match.groups()
a__ : Union[str, Any] = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
a__ : str = re_encoder_block_proj_out.sub(_UpperCamelCase , _UpperCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCamelCase ):
a__ : List[Any] = re_decoder_block_conv_out.match(_UpperCamelCase )
a__ : Optional[int] = regex_match.groups()
a__ : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
a__ : List[Any] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
a__ : Any = re_decoder_block_conv_out.sub(_UpperCamelCase , _UpperCamelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCamelCase ):
a__ : int = re_decoder_block_resnet.match(_UpperCamelCase )
a__ : Optional[Any] = regex_match.groups()
a__ : Tuple = int(groups[2] ) * 2 + int(groups[3] ) - 2
a__ : List[Any] = {"1": 1, "3": 2}[groups[-2]]
a__ : List[str] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
a__ : Optional[Any] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
a__ : Optional[Any] = prefix + resnet_block
a__ : Optional[int] = re_decoder_block_resnet.sub(_UpperCamelCase , _UpperCamelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCamelCase ):
a__ : Tuple = re_decoder_block_proj_in.match(_UpperCamelCase )
a__ : str = regex_match.groups()
a__ : str = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
a__ : Dict = re_decoder_block_proj_in.sub(_UpperCamelCase , _UpperCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCamelCase ):
a__ : Tuple = re_prior_cond_conv_out.match(_UpperCamelCase )
a__ : Dict = regex_match.groups()
a__ : Dict = int(groups[1] ) * 2 + int(groups[2] ) - 2
a__ : Optional[Any] = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
a__ : List[str] = re_prior_cond_conv_out.sub(_UpperCamelCase , _UpperCamelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCamelCase ):
a__ : List[str] = re_prior_cond_resnet.match(_UpperCamelCase )
a__ : Any = regex_match.groups()
a__ : str = int(groups[1] ) * 2 + int(groups[2] ) - 2
a__ : List[Any] = {"1": 1, "3": 2}[groups[-2]]
a__ : Any = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
a__ : List[Any] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
a__ : List[str] = prefix + resnet_block
a__ : Tuple = re_prior_cond_resnet.sub(_UpperCamelCase , _UpperCamelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCamelCase ):
a__ : Any = re_prior_cond_proj_in.match(_UpperCamelCase )
a__ : List[Any] = regex_match.groups()
a__ : Any = f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
a__ : List[Any] = re_prior_cond_proj_in.sub(_UpperCamelCase , _UpperCamelCase )
# keep original key
else:
a__ : Union[str, Any] = original_key
a__ : Tuple = replace_key(_UpperCamelCase )
if f'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(f'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape:
a__ : str = model_state_dict[f'''{key_prefix}.{key}''']
print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
a__ : Optional[Any] = original_key
a__ : List[str] = original_key
a__ : List[Any] = value
return new_dict
@torch.no_grad()
def UpperCamelCase_ ( __a=None , __a=None ) -> Any:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
a__ : List[str] = requests.get(f'''{PREFIX}{file}''' , allow_redirects=_UpperCamelCase )
os.makedirs(f'''{pytorch_dump_folder_path}/''' , exist_ok=_UpperCamelCase )
open(f'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , "wb" ).write(r.content )
a__ : Optional[int] = MODEL_MAPPING[model_name.split("/" )[-1]]
a__ : Any = JukeboxConfig.from_pretrained(_UpperCamelCase )
a__ : Optional[int] = JukeboxModel(_UpperCamelCase )
a__ : Optional[int] = []
a__ : str = {}
for i, dict_name in enumerate(_UpperCamelCase ):
a__ : Union[str, Any] = torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )["model"]
a__ : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
a__ : Optional[int] = old_dic[k]
elif k.endswith(".w" ):
a__ : Optional[int] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
a__ : Dict = old_dic[k]
else:
a__ : Any = old_dic[k]
a__ : int = "vqvae" if i == 0 else f'''priors.{3 - i}'''
a__ : Union[str, Any] = fix_jukebox_keys(_UpperCamelCase , model.state_dict() , _UpperCamelCase , _UpperCamelCase )
weight_dict.append(_UpperCamelCase )
a__ : str = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
with open(f'''{pytorch_dump_folder_path}/mapping.json''' , "w" ) as txtfile:
json.dump(_UpperCamelCase , _UpperCamelCase )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_UpperCamelCase )
return weight_dict
if __name__ == "__main__":
UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
UpperCamelCase : List[str] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 37
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a : List[str] = logging.get_logger(__name__)
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = ["pixel_values"]
def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = None , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = True , snake_case_ = 1 / 2_5_5 , snake_case_ = True , snake_case_ = None , snake_case_ = None , **snake_case_ , ) -> None:
'''simple docstring'''
super().__init__(**snake_case_ )
__lowercase = size if size is not None else {'''shortest_edge''': 3_8_4}
__lowercase = get_size_dict(snake_case_ , default_to_square=snake_case_ )
__lowercase = do_resize
__lowercase = size
# Default value set here for backwards compatibility where the value in config is None
__lowercase = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
__lowercase = resample
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = PILImageResampling.BICUBIC , snake_case_ = None , **snake_case_ , ) -> np.ndarray:
'''simple docstring'''
__lowercase = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
__lowercase = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__lowercase = int(shortest_edge / crop_pct )
__lowercase = get_resize_output_image_size(snake_case_ , size=snake_case_ , default_to_square=snake_case_ )
__lowercase = resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=snake_case_ , size=(shortest_edge, shortest_edge) , data_format=snake_case_ , **snake_case_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
snake_case_ , size=(shortest_edge, shortest_edge) , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def A ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ) -> Optional[int]:
'''simple docstring'''
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ) -> np.ndarray:
'''simple docstring'''
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def A ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ) -> PIL.Image.Image:
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = crop_pct if crop_pct is not None else self.crop_pct
__lowercase = resample if resample is not None else self.resample
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(snake_case_ , default_to_square=snake_case_ )
__lowercase = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
__lowercase = [self.resize(image=snake_case_ , size=snake_case_ , crop_pct=snake_case_ , resample=snake_case_ ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
__lowercase = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 639
| 0
|
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class a ( _UpperCAmelCase ,unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = RoCBertTokenizer
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[str] = filter_non_english
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
super().setUp()
__lowerCamelCase: List[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
__lowerCamelCase: Any = {}
__lowerCamelCase: Any = {}
for i, value in enumerate(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase: Union[str, Any] = i
__lowerCamelCase: Dict = i
__lowerCamelCase: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] )
__lowerCamelCase: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ )
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
__lowerCamelCase: Optional[int] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__lowerCamelCase: Union[str, Any] = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_ ) , [5, 6, 2, 5, 7, 8] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
__lowerCamelCase: Optional[int] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
__lowerCamelCase: Tuple = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
__lowerCamelCase: Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
__lowerCamelCase: Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : int ):
__lowerCamelCase: Any = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
__lowerCamelCase: Tuple = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
__lowerCamelCase: Any = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
__lowerCamelCase: Tuple = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
__lowerCamelCase: str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
__lowerCamelCase: str = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__lowerCamelCase: Any = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase: Any = i
__lowerCamelCase: Optional[Any] = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
__lowerCamelCase: Optional[int] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
__lowerCamelCase: List[str] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
def SCREAMING_SNAKE_CASE__ ( self : str ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase: Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[str] = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
__lowerCamelCase: Any = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase: List[str] = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ , """do_lower_case""" ) else False
__lowerCamelCase: Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
__lowerCamelCase: Dict = ["""的""", """人""", """有"""]
__lowerCamelCase: Dict = """""".join(SCREAMING_SNAKE_CASE_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase: Union[str, Any] = True
__lowerCamelCase: str = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[int] = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Any = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[int] = False
__lowerCamelCase: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Any = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Any = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: str = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Any = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Union[str, Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
# it is expected that only the first Chinese character is not preceded by "##".
__lowerCamelCase: Dict = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_ )
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
__lowerCamelCase: Any = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__lowerCamelCase: Optional[int] = tokenizer.encode("""你好""" , add_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Tuple = tokenizer.encode("""你是谁""" , add_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Any = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
__lowerCamelCase: Dict = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__lowerCamelCase: int = """你好,你是谁"""
__lowerCamelCase: Optional[int] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: str = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[int] = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[int] = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Any = tokenizer.prepare_for_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: str = tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 189
|
from __future__ import annotations
_A : List[str] = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class a :
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : dict[str, list[str]] , SCREAMING_SNAKE_CASE_ : str ):
__lowerCamelCase: Dict = graph
# mapping node to its parent in resulting breadth first tree
__lowerCamelCase: dict[str, str | None] = {}
__lowerCamelCase: str = source_vertex
def SCREAMING_SNAKE_CASE__ ( self : int ):
__lowerCamelCase: Optional[int] = {self.source_vertex}
__lowerCamelCase: Optional[int] = None
__lowerCamelCase: Optional[int] = [self.source_vertex] # first in first out queue
while queue:
__lowerCamelCase: int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = vertex
queue.append(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ):
if target_vertex == self.source_vertex:
return self.source_vertex
__lowerCamelCase: List[str] = self.parent.get(SCREAMING_SNAKE_CASE_ )
if target_vertex_parent is None:
__lowerCamelCase: Optional[int] = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
return self.shortest_path(SCREAMING_SNAKE_CASE_ ) + F'->{target_vertex}'
if __name__ == "__main__":
_A : int = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 189
| 1
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _snake_case ( __snake_case ):
# A local function to see if a dot lands in the circle.
def is_in_circle(__snake_case , __snake_case ) -> bool:
_UpperCamelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_UpperCamelCase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__snake_case ) )
# The ratio of the area for circle to square is pi/4.
_UpperCamelCase = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""" )
print(f"""The numpy value of pi is {pi}""" )
print(f"""The total error is {abs(pi - pi_estimate )}""" )
def _snake_case ( __snake_case , __snake_case , __snake_case = 0.0 , __snake_case = 1.0 , ):
return mean(
function_to_integrate(uniform(__snake_case , __snake_case ) ) for _ in range(__snake_case ) ) * (max_value - min_value)
def _snake_case ( __snake_case , __snake_case = 0.0 , __snake_case = 1.0 ):
def identity_function(__snake_case ) -> float:
return x
_UpperCamelCase = area_under_curve_estimator(
__snake_case , __snake_case , __snake_case , __snake_case )
_UpperCamelCase = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {expected_value}""" )
print(f"""Total error is {abs(estimated_value - expected_value )}""" )
print('''******************''' )
def _snake_case ( __snake_case ):
def function_to_integrate(__snake_case ) -> float:
return sqrt(4.0 - x * x )
_UpperCamelCase = area_under_curve_estimator(
__snake_case , __snake_case , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {pi}""" )
print(f"""Total error is {abs(estimated_value - pi )}""" )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10
|
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
def __magic_name__ ( self , _A ) -> float:
return 0.0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Union[str, Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__a : List[Any] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Dict = 512
__a : Dict = [1] + [0] * (size - 1)
__a : Optional[int] = [filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs]
__a : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
__a : Union[str, Any] = np.abs(np.fft.fft(SCREAMING_SNAKE_CASE__ ) )
__a : Optional[int] = 20 * np.logaa(SCREAMING_SNAKE_CASE__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
__a : Dict = get_bounds(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(SCREAMING_SNAKE_CASE__ )
plt.show()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Optional[Any] = 512
__a : List[str] = [1] + [0] * (size - 1)
__a : List[str] = [filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs]
__a : Any = [0] * (samplerate - size) # zero-padding
outputs += filler
__a : Tuple = np.angle(np.fft.fft(SCREAMING_SNAKE_CASE__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(SCREAMING_SNAKE_CASE__ , -2 * pi ) )
plt.show()
| 597
| 0
|
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
lowerCamelCase__ = logging.get_logger(__name__)
def A():
# Get the sagemaker specific mp parameters from smp_options variable.
lowerCAmelCase_ = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCAmelCase_ = json.loads(lowerCamelCase_ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCAmelCase_ = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCAmelCase_ = json.loads(lowerCamelCase_ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCamelCase_ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class __magic_name__ (__lowerCamelCase ):
lowerCamelCase__ = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , UpperCAmelCase_ , )
@cached_property
def __a ( self ) -> str:
'''simple docstring'''
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
lowerCAmelCase_ = torch.device("cpu" )
lowerCAmelCase_ = 0
elif is_sagemaker_model_parallel_available():
lowerCAmelCase_ = smp.local_rank()
lowerCAmelCase_ = torch.device("cuda" , UpperCAmelCase_ )
lowerCAmelCase_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
lowerCAmelCase_ = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
lowerCAmelCase_ = torch.device("cuda" , self.local_rank )
lowerCAmelCase_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCAmelCase_ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCAmelCase_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
lowerCAmelCase_ = torch.device("cuda" , self.local_rank )
lowerCAmelCase_ = 1
if device.type == "cuda":
torch.cuda.set_device(UpperCAmelCase_ )
return device
@property
def __a ( self ) -> Any:
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __a ( self ) -> List[Any]:
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def __a ( self ) -> Optional[int]:
'''simple docstring'''
return False
| 706
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A():
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=__a , default=__a , required=__a , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=__a , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=__a , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=__a , default=42 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=__a , default=0 , help="cuda_id." , )
lowerCAmelCase_ = parser.parse_args()
return args
def A(__a: List[Any] , __a: Any , __a: Optional[Any] ):
if not len(__a ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
lowerCAmelCase_ , lowerCAmelCase_ = imgs[0].size
lowerCAmelCase_ = Image.new("RGB" , size=(cols * w, rows * h) )
lowerCAmelCase_ , lowerCAmelCase_ = grid.size
for i, img in enumerate(__a ):
grid.paste(__a , box=(i % cols * w, i // cols * h) )
return grid
def A(__a: Optional[int] , __a: List[Any]="robotic cat with wings" , __a: str=7.5 , __a: Optional[int]=50 , __a: List[Any]=1 , __a: List[Any]=42 , ):
lowerCAmelCase_ = torch.Generator(pipeline.device ).manual_seed(__a )
lowerCAmelCase_ = pipeline(
__a , guidance_scale=__a , num_inference_steps=__a , generator=__a , num_images_per_prompt=__a , ).images
lowerCAmelCase_ = int(math.sqrt(__a ) )
lowerCAmelCase_ = image_grid(__a , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowerCamelCase__ = parse_args()
# Load models and create wrapper for stable diffusion
lowerCamelCase__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
lowerCamelCase__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
lowerCamelCase__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
lowerCamelCase__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
lowerCamelCase__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowerCamelCase__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
lowerCamelCase__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
lowerCamelCase__ = unet.to(torch.device('''cuda''', args.cuda_id))
lowerCamelCase__ = pipeline.to(unet.device)
lowerCamelCase__ , lowerCamelCase__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
lowerCamelCase__ = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 226
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 65
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : str = logging.get_logger(__name__)
lowercase : Optional[int] = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = "markuplm"
def __init__( self : str , A_ : List[Any]=3_05_22 , A_ : Dict=7_68 , A_ : str=12 , A_ : Tuple=12 , A_ : Any=30_72 , A_ : Union[str, Any]="gelu" , A_ : int=0.1 , A_ : List[str]=0.1 , A_ : int=5_12 , A_ : Tuple=2 , A_ : int=0.02 , A_ : str=1e-12 , A_ : str=0 , A_ : List[Any]=0 , A_ : Union[str, Any]=2 , A_ : Tuple=2_56 , A_ : Tuple=10_24 , A_ : str=2_16 , A_ : List[Any]=10_01 , A_ : Optional[int]=32 , A_ : List[str]=50 , A_ : List[str]="absolute" , A_ : Any=True , A_ : str=None , **A_ : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ , )
lowerCamelCase_: Optional[int] = vocab_size
lowerCamelCase_: Any = hidden_size
lowerCamelCase_: Tuple = num_hidden_layers
lowerCamelCase_: Optional[Any] = num_attention_heads
lowerCamelCase_: Tuple = hidden_act
lowerCamelCase_: Dict = intermediate_size
lowerCamelCase_: Any = hidden_dropout_prob
lowerCamelCase_: Dict = attention_probs_dropout_prob
lowerCamelCase_: str = max_position_embeddings
lowerCamelCase_: int = type_vocab_size
lowerCamelCase_: Optional[int] = initializer_range
lowerCamelCase_: Union[str, Any] = layer_norm_eps
lowerCamelCase_: List[str] = position_embedding_type
lowerCamelCase_: List[Any] = use_cache
lowerCamelCase_: Dict = classifier_dropout
# additional properties
lowerCamelCase_: Dict = max_depth
lowerCamelCase_: Tuple = max_xpath_tag_unit_embeddings
lowerCamelCase_: int = max_xpath_subs_unit_embeddings
lowerCamelCase_: str = tag_pad_id
lowerCamelCase_: List[str] = subs_pad_id
lowerCamelCase_: Tuple = xpath_unit_hidden_size
| 423
| 0
|
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class a ( __UpperCAmelCase ):
lowercase_ : str = 'EncodecFeatureExtractor'
lowercase_ : List[Any] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : str , snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
__lowerCAmelCase = self.feature_extractor
__lowerCAmelCase = False
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Dict=None , snake_case__ : Optional[Any]=None , snake_case__ : str=True ):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=snake_case__ , language=snake_case__ , no_timestamps=snake_case__ )
def __call__( self : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*snake_case__ , **snake_case__ )
__lowerCAmelCase = kwargs.pop("audio" , snake_case__ )
__lowerCAmelCase = kwargs.pop("sampling_rate" , snake_case__ )
__lowerCAmelCase = kwargs.pop("text" , snake_case__ )
if len(snake_case__ ) > 0:
__lowerCAmelCase = args[0]
__lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
__lowerCAmelCase = self.tokenizer(snake_case__ , **snake_case__ )
if audio is not None:
__lowerCAmelCase = self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__lowerCAmelCase = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
__lowerCAmelCase = audio_inputs["padding_mask"]
return inputs
def UpperCAmelCase__ ( self : Union[str, Any] , *snake_case__ : Optional[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = kwargs.pop("audio" , snake_case__ )
__lowerCAmelCase = kwargs.pop("padding_mask" , snake_case__ )
if len(snake_case__ ) > 0:
__lowerCAmelCase = args[0]
__lowerCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(snake_case__ , padding_mask=snake_case__ )
else:
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self : Any , *snake_case__ : Any , **snake_case__ : str ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : List[Any] , snake_case__ : Optional = None ):
"""simple docstring"""
__lowerCAmelCase = to_numpy(snake_case__ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = audio_values.shape
if padding_mask is None:
return list(snake_case__ )
__lowerCAmelCase = to_numpy(snake_case__ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__lowerCAmelCase = seq_len - padding_mask.shape[-1]
__lowerCAmelCase = 1 - self.feature_extractor.padding_value
__lowerCAmelCase = np.pad(snake_case__ , ((0, 0), (0, difference)) , "constant" , constant_values=snake_case__ )
__lowerCAmelCase = audio_values.tolist()
for i in range(snake_case__ ):
__lowerCAmelCase = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__lowerCAmelCase = sliced_audio.reshape(snake_case__ , -1 )
return audio_values
| 376
|
from __future__ import annotations
UpperCamelCase_ = list[tuple[int, int]]
UpperCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase_ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class a :
def __init__( self : Optional[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : float , snake_case__ : Node | None , ):
"""simple docstring"""
__lowerCAmelCase = pos_x
__lowerCAmelCase = pos_y
__lowerCAmelCase = (pos_y, pos_x)
__lowerCAmelCase = goal_x
__lowerCAmelCase = goal_y
__lowerCAmelCase = g_cost
__lowerCAmelCase = parent
__lowerCAmelCase = self.calculate_heuristic()
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__lowerCAmelCase = abs(self.pos_x - self.goal_x )
__lowerCAmelCase = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Optional[int] , snake_case__ : int ):
"""simple docstring"""
return self.f_cost < other.f_cost
class a :
def __init__( self : Dict , snake_case__ : tuple[int, int] , snake_case__ : tuple[int, int] ):
"""simple docstring"""
__lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case__ )
__lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , snake_case__ )
__lowerCAmelCase = [self.start]
__lowerCAmelCase = []
__lowerCAmelCase = False
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCAmelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__lowerCAmelCase = True
return self.retrace_path(snake_case__ )
self.closed_nodes.append(snake_case__ )
__lowerCAmelCase = self.get_successors(snake_case__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case__ )
else:
# retrieve the best current path
__lowerCAmelCase = self.open_nodes.pop(self.open_nodes.index(snake_case__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case__ )
else:
self.open_nodes.append(snake_case__ )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ ( self : int , snake_case__ : Node ):
"""simple docstring"""
__lowerCAmelCase = []
for action in delta:
__lowerCAmelCase = parent.pos_x + action[1]
__lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case__ , snake_case__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case__ , ) )
return successors
def UpperCAmelCase__ ( self : List[str] , snake_case__ : Node | None ):
"""simple docstring"""
__lowerCAmelCase = node
__lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
UpperCamelCase_ = GreedyBestFirst(init, goal)
UpperCamelCase_ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
UpperCamelCase_ = 2
for elem in grid:
print(elem)
| 376
| 1
|
from maths.prime_check import is_prime
def UpperCAmelCase__ ( __snake_case ) -> int:
if not isinstance(__snake_case , __snake_case ):
_A = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__snake_case )
if is_prime(__snake_case ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 317
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _snake_case ( lowerCamelCase ):
"""simple docstring"""
lowerCamelCase_ = '''audio-spectrogram-transformer'''
def __init__( self , a=7_6_8 , a=1_2 , a=1_2 , a=3_0_7_2 , a="gelu" , a=0.0 , a=0.0 , a=0.02 , a=1e-12 , a=1_6 , a=True , a=1_0 , a=1_0 , a=1_0_2_4 , a=1_2_8 , **a , ) -> int:
"""simple docstring"""
super().__init__(**a )
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = patch_size
_A = qkv_bias
_A = frequency_stride
_A = time_stride
_A = max_length
_A = num_mel_bins
| 317
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : Optional[int] = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A__ : Union[str, Any] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A__ : Union[str, Any] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A__ : Tuple = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : List[str] = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
A__ : Union[str, Any] = field(metadata={"help": "Should contain the data files for the task."} )
A__ : Optional[int] = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ : Union[str, Any] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def A ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
A__ = processors[data_args.task_name]()
A__ = processor.get_labels()
A__ = len(__UpperCamelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__UpperCamelCase ) -> Dict:
A__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__UpperCamelCase , p.label_ids )}
# Data collator
A__ = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A__ = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A__ = trainer.evaluate()
A__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(__UpperCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , __UpperCamelCase , __UpperCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(__UpperCamelCase )
return results
def A ( __UpperCamelCase ) -> int:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 705
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Any = IFInpaintingPipeline
A__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
A__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A__ : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
def _a ( self : Any ):
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _snake_case : Any , _snake_case : str=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
A__ = torch.manual_seed(_snake_case )
else:
A__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _a ( self : int ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _a ( self : Optional[int] ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _a ( self : List[str] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
self._test_save_load_local()
def _a ( self : Optional[int] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 52
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __lowercase ( A ):
__magic_name__ : Optional[torch.FloatTensor] = None
__magic_name__ : torch.FloatTensor = None
__magic_name__ : Optional[Tuple[torch.FloatTensor]] = None
__magic_name__ : Optional[Tuple[torch.FloatTensor]] = None
class __lowercase ( A ):
def __init__( self , a__=1 , a__=0 , a__=2 , a__=5_1_2 , a__="cls" , a__=False , a__=True , **a__ , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
A_ = project_dim
A_ = pooler_fn
A_ = learn_encoder
A_ = use_attention_mask
class __lowercase ( A ):
__magic_name__ : int = [r'''pooler''', r'''logit_scale''']
__magic_name__ : Union[str, Any] = [r'''position_ids''', r'''predictions.decoder.bias''']
__magic_name__ : List[Any] = '''roberta'''
__magic_name__ : str = RobertaSeriesConfig
def __init__( self , a__ ) -> int:
'''simple docstring'''
super().__init__(a__ )
A_ = XLMRobertaModel(a__ )
A_ = nn.Linear(config.hidden_size , config.project_dim )
A_ = getattr(a__ , '''has_pre_transformation''' , a__ )
if self.has_pre_transformation:
A_ = nn.Linear(config.hidden_size , config.project_dim )
A_ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def lowerCAmelCase_ ( self , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , ) -> Dict:
'''simple docstring'''
A_ = return_dict if return_dict is not None else self.config.use_return_dict
A_ = self.base_model(
input_ids=a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , output_attentions=a__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=a__ , )
if self.has_pre_transformation:
A_ = outputs['''hidden_states'''][-2]
A_ = self.pre_LN(a__ )
A_ = self.transformation_pre(a__ )
return TransformationModelOutput(
projection_state=a__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
A_ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=a__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 141
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
class __lowercase ( A ):
__magic_name__ : Any = '''sequence-classification'''
def __init__( self , a__ ) -> Optional[Any]:
'''simple docstring'''
if type(a__ ) == dict:
A_ = Namespace(**a__ )
A_ = glue_output_modes[hparams.task]
A_ = glue_tasks_num_labels[hparams.task]
super().__init__(a__ , a__ , self.mode )
def lowerCAmelCase_ ( self , **a__ ) -> List[Any]:
'''simple docstring'''
return self.model(**a__ )
def lowerCAmelCase_ ( self , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
A_ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A_ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
A_ = self(**a__ )
A_ = outputs[0]
A_ = self.trainer.lr_schedulers[0]['''scheduler''']
A_ = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
A_ = self.hparams
A_ = processors[args.task]()
A_ = processor.get_labels()
for mode in ["train", "dev"]:
A_ = self._feature_file(a__ )
if os.path.exists(a__ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , a__ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
A_ = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
A_ = convert_examples_to_features(
a__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , a__ )
torch.save(a__ , a__ )
def lowerCAmelCase_ ( self , a__ , a__ , a__ = False ) -> DataLoader:
'''simple docstring'''
A_ = '''dev''' if mode == '''test''' else mode
A_ = self._feature_file(a__ )
logger.info('''Loading features from cached file %s''' , a__ )
A_ = torch.load(a__ )
A_ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
A_ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
A_ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
A_ = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
A_ = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a__ , a__ , a__ , a__ ) , batch_size=a__ , shuffle=a__ , )
def lowerCAmelCase_ ( self , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A_ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
A_ = self(**a__ )
A_ , A_ = outputs[:2]
A_ = logits.detach().cpu().numpy()
A_ = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCAmelCase_ ( self , a__ ) -> tuple:
'''simple docstring'''
A_ = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
A_ = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
A_ = np.argmax(a__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
A_ = np.squeeze(a__ )
A_ = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
A_ = [[] for _ in range(out_label_ids.shape[0] )]
A_ = [[] for _ in range(out_label_ids.shape[0] )]
A_ = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , a__ , a__ )}
A_ = dict(results.items() )
A_ = results
return ret, preds_list, out_label_list
def lowerCAmelCase_ ( self , a__ ) -> dict:
'''simple docstring'''
A_ , A_ , A_ = self._eval_end(a__ )
A_ = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCAmelCase_ ( self , a__ ) -> dict:
'''simple docstring'''
A_ , A_ , A_ = self._eval_end(a__ )
A_ = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCAmelCase_ ( a__ , a__ ) -> Dict:
'''simple docstring'''
BaseTransformer.add_model_specific_args(a__ , a__ )
parser.add_argument(
'''--max_seq_length''' , default=1_2_8 , type=a__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=a__ , required=a__ , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=a__ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def lowerCamelCase_ ( ):
A_ = argparse.ArgumentParser()
add_generic_args(__UpperCamelCase , os.getcwd() )
A_ = GLUETransformer.add_model_specific_args(__UpperCamelCase , os.getcwd() )
A_ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
A_ = os.path.join(
'''./results''' , F"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
A_ = GLUETransformer(__UpperCamelCase )
A_ = generic_train(__UpperCamelCase , __UpperCamelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
A_ = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=__UpperCamelCase ) )
A_ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__UpperCamelCase )
if __name__ == "__main__":
main()
| 141
| 1
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : int = {"""vocab_file""": """vocab.txt"""}
UpperCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
UpperCamelCase : Optional[int] = {
"""facebook/esm2_t6_8M_UR50D""": 1_024,
"""facebook/esm2_t12_35M_UR50D""": 1_024,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> Optional[int]:
"""simple docstring"""
with open(snake_case , 'r' ) as f:
a : Optional[Any] = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Any = ["input_ids", "attention_mask"]
def __init__( self : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any]="<unk>" , UpperCAmelCase_ : Union[str, Any]="<cls>" , UpperCAmelCase_ : str="<pad>" , UpperCAmelCase_ : Optional[int]="<mask>" , UpperCAmelCase_ : List[str]="<eos>" , **UpperCAmelCase_ : str , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : Optional[Any] = load_vocab_file(UpperCAmelCase_)
a : Tuple = dict(enumerate(self.all_tokens))
a : Dict = {tok: ind for ind, tok in enumerate(self.all_tokens)}
a : Any = unk_token
a : List[Any] = cls_token
a : List[str] = pad_token
a : Optional[int] = mask_token
a : Dict = eos_token
a : Optional[Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens)
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : int):
"""simple docstring"""
return self._id_to_token.get(UpperCAmelCase_ , self.unk_token)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : str):
"""simple docstring"""
return self._token_to_id.get(UpperCAmelCase_ , self._token_to_id.get(self.unk_token))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int):
"""simple docstring"""
return text.split()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : int=False):
"""simple docstring"""
return len(self._id_to_token)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens)}
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : str):
"""simple docstring"""
return self._token_to_id.get(UpperCAmelCase_ , self._token_to_id.get(self.unk_token))
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : int):
"""simple docstring"""
return self._id_to_token.get(UpperCAmelCase_ , self.unk_token)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
a : List[Any] = [self.cls_token_id]
a : Optional[Any] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!')
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : List , UpperCAmelCase_ : Optional[List] = None , UpperCAmelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
a : int = [1] + ([0] * len(UpperCAmelCase_)) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCAmelCase_) + [1]
return mask
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : int):
"""simple docstring"""
a : Any = os.path.join(UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt')
with open(UpperCAmelCase_ , 'w') as f:
f.write('\n'.join(self.all_tokens))
return (vocab_file,)
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Union[List[str], List[AddedToken]] , UpperCAmelCase_ : bool = False):
"""simple docstring"""
return super()._add_tokens(UpperCAmelCase_ , special_tokens=UpperCAmelCase_)
| 706
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : str = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase : Optional[int] = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
}
}
UpperCamelCase : List[str] = {
"""camembert-base""": 512,
}
UpperCamelCase : List[Any] = """▁"""
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = VOCAB_FILES_NAMES
A : Any = PRETRAINED_VOCAB_FILES_MAP
A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Any = ["input_ids", "attention_mask"]
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]="<s>" , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Optional[Any]="<s>" , UpperCAmelCase_ : int="<unk>" , UpperCAmelCase_ : int="<pad>" , UpperCAmelCase_ : Tuple="<mask>" , UpperCAmelCase_ : int=["<s>NOTUSED", "</s>NOTUSED"] , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : Dict = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
a : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(UpperCAmelCase_))
a : Tuple = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
a : str = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
a : Optional[Any] = len(self.fairseq_tokens_to_ids)
a : List[str] = len(self.sp_model) + len(self.fairseq_tokens_to_ids)
a : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a : Optional[int] = [self.cls_token_id]
a : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
a : List[Any] = [self.sep_token_id]
a : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return len(self.fairseq_tokens_to_ids) + len(self.sp_model)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : int = {self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : str):
"""simple docstring"""
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(UpperCAmelCase_) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : List[str] = []
a : List[str] = ''
a : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_) + token
a : Tuple = True
a : Optional[Any] = []
else:
current_sub_tokens.append(UpperCAmelCase_)
a : int = False
out_string += self.sp_model.decode(UpperCAmelCase_)
return out_string.strip()
def __getstate__( self : Union[str, Any]):
"""simple docstring"""
a : str = self.__dict__.copy()
a : List[Any] = None
return state
def __setstate__( self : List[Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
a : Tuple = {}
a : str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
a : Optional[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase_ , 'wb') as fi:
a : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_)
return (out_vocab_file,)
| 610
| 0
|
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __UpperCAmelCase ( __a : str ,__a : Optional[int] ) -> str:
"""simple docstring"""
assert isinstance(__a ,__a )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' ,[False, True] )
def __UpperCAmelCase ( __a : Any ,__a : Union[str, Any] ,__a : int ) -> Tuple:
"""simple docstring"""
_a : str = tmp_path / '''cache'''
_a : List[Any] = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_a : List[Any] = TextDatasetReader(__a ,cache_dir=__a ,keep_in_memory=__a ).read()
_check_text_dataset(__a ,__a )
@pytest.mark.parametrize(
'''features''' ,[
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] ,)
def __UpperCAmelCase ( __a : Optional[int] ,__a : Optional[Any] ,__a : int ) -> List[str]:
"""simple docstring"""
_a : Dict = tmp_path / '''cache'''
_a : Optional[Any] = {'''text''': '''string'''}
_a : Optional[Any] = features.copy() if features else default_expected_features
_a : Tuple = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
_a : List[str] = TextDatasetReader(__a ,features=__a ,cache_dir=__a ).read()
_check_text_dataset(__a ,__a )
@pytest.mark.parametrize('''split''' ,[None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __UpperCAmelCase ( __a : str ,__a : int ,__a : List[str] ) -> List[Any]:
"""simple docstring"""
_a : Any = tmp_path / '''cache'''
_a : List[str] = {'''text''': '''string'''}
_a : str = TextDatasetReader(__a ,cache_dir=__a ,split=__a ).read()
_check_text_dataset(__a ,__a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' ,[str, list] )
def __UpperCAmelCase ( __a : int ,__a : int ,__a : str ) -> Optional[Any]:
"""simple docstring"""
if issubclass(__a ,__a ):
_a : Tuple = text_path
elif issubclass(__a ,__a ):
_a : Tuple = [text_path]
_a : Union[str, Any] = tmp_path / '''cache'''
_a : Any = {'''text''': '''string'''}
_a : int = TextDatasetReader(__a ,cache_dir=__a ).read()
_check_text_dataset(__a ,__a )
def __UpperCAmelCase ( __a : Tuple ,__a : Union[str, Any] ,__a : Optional[int]=("train",) ) -> List[Any]:
"""simple docstring"""
assert isinstance(__a ,__a )
for split in splits:
_a : Union[str, Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' ,[False, True] )
def __UpperCAmelCase ( __a : int ,__a : int ,__a : Optional[Any] ) -> Any:
"""simple docstring"""
_a : List[str] = tmp_path / '''cache'''
_a : List[str] = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_a : Union[str, Any] = TextDatasetReader({'''train''': text_path} ,cache_dir=__a ,keep_in_memory=__a ).read()
_check_text_datasetdict(__a ,__a )
@pytest.mark.parametrize(
'''features''' ,[
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] ,)
def __UpperCAmelCase ( __a : Dict ,__a : List[str] ,__a : Optional[int] ) -> Tuple:
"""simple docstring"""
_a : Optional[Any] = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_a : Optional[Any] = {'''text''': '''string'''}
_a : str = features.copy() if features else default_expected_features
_a : Dict = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
_a : Union[str, Any] = TextDatasetReader({'''train''': text_path} ,features=__a ,cache_dir=__a ).read()
_check_text_datasetdict(__a ,__a )
@pytest.mark.parametrize('''split''' ,[None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __UpperCAmelCase ( __a : int ,__a : int ,__a : Dict ) -> Dict:
"""simple docstring"""
if split:
_a : Union[str, Any] = {split: text_path}
else:
_a : Any = '''train'''
_a : Any = {'''train''': text_path, '''test''': text_path}
_a : List[str] = tmp_path / '''cache'''
_a : Union[str, Any] = {'''text''': '''string'''}
_a : Tuple = TextDatasetReader(__a ,cache_dir=__a ).read()
_check_text_datasetdict(__a ,__a ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 14
|
'''simple docstring'''
from torch import nn
def _A ( UpperCAmelCase ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" )
| 531
| 0
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
UpperCAmelCase_ = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any )->List[str]:
_lowerCAmelCase = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] )->Optional[Any]:
_lowerCAmelCase = list(s_dict.keys() )
for key in keys:
_lowerCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_lowerCAmelCase = new_key.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f'''{key} -> {new_key}''' )
_lowerCAmelCase = s_dict.pop(_SCREAMING_SNAKE_CASE )
return s_dict
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] )->Optional[Any]:
_lowerCAmelCase = emb.weight.shape
_lowerCAmelCase = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = emb.weight.data
return lin_layer
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->bytes:
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = os.path.basename(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = url.split('''/''' )[-2]
_lowerCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if os.path.exists(_SCREAMING_SNAKE_CASE ) and not os.path.isfile(_SCREAMING_SNAKE_CASE ):
raise RuntimeError(f'''{download_target} exists and is not a regular file''' )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = open(_SCREAMING_SNAKE_CASE , '''rb''' ).read()
if hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(_SCREAMING_SNAKE_CASE ) as source, open(_SCREAMING_SNAKE_CASE , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=8_0 , unit='''iB''' , unit_scale=_SCREAMING_SNAKE_CASE , unit_divisor=1_0_2_4 ) as loop:
while True:
_lowerCAmelCase = source.read(8_1_9_2 )
if not buffer:
break
output.write(_SCREAMING_SNAKE_CASE )
loop.update(len(_SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase = open(_SCREAMING_SNAKE_CASE , '''rb''' ).read()
if hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any )->Optional[Any]:
if ".pt" not in checkpoint_path:
_lowerCAmelCase = _download(_MODELS[checkpoint_path] )
else:
_lowerCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )
_lowerCAmelCase = original_checkpoint["""dims"""]
_lowerCAmelCase = original_checkpoint["""model_state_dict"""]
_lowerCAmelCase = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
rename_keys(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = True
_lowerCAmelCase = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
_lowerCAmelCase = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_SCREAMING_SNAKE_CASE , decoder_ffn_dim=_SCREAMING_SNAKE_CASE , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
_lowerCAmelCase = WhisperForConditionalGeneration(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = model.model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0 and not set(_SCREAMING_SNAKE_CASE ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
_lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_lowerCAmelCase = proj_out_weights
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 708
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase_ = 1_0
UpperCAmelCase_ = 2_5_6
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] )->Optional[MinHash]:
if len(_SCREAMING_SNAKE_CASE ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase = MinHash(num_perm=_SCREAMING_SNAKE_CASE )
for token in set(_SCREAMING_SNAKE_CASE ):
min_hash.update(token.encode() )
return min_hash
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Set[str]:
return {t for t in NON_ALPHA.split(_SCREAMING_SNAKE_CASE ) if len(t.strip() ) > 0}
class UpperCAmelCase :
def __init__( self , *,
_lowerCAmelCase = 0.85 , ):
_lowerCAmelCase = duplication_jaccard_threshold
_lowerCAmelCase = NUM_PERM
_lowerCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase = defaultdict(_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self._index.query(_lowerCAmelCase )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCAmelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase = [base] + list(_lowerCAmelCase )
# reformat the cluster to be a list of dict
_lowerCAmelCase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(_lowerCAmelCase )
return duplicate_clusters
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = self.get_duplicate_clusters()
with open(_lowerCAmelCase , '''w''' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = element
_lowerCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] )->Any:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_SCREAMING_SNAKE_CASE , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float )->str:
_lowerCAmelCase = DuplicationIndex(duplication_jaccard_threshold=_SCREAMING_SNAKE_CASE )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_SCREAMING_SNAKE_CASE ) ) , max_queue_size=1_0_0 ) ):
di.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->float:
_lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase_ = None
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any )->List[Any]:
_lowerCAmelCase = []
for elementa in cluster:
_lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
_lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase = 1
extremes.append(_SCREAMING_SNAKE_CASE )
return extremes
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str )->Tuple:
global _shared_dataset
_lowerCAmelCase = dataset
_lowerCAmelCase = []
_lowerCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=_SCREAMING_SNAKE_CASE )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) , total=len(_SCREAMING_SNAKE_CASE ) , ):
extremes_list.append(_SCREAMING_SNAKE_CASE )
return extremes_list
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float = 0.85 )->Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase = make_duplicate_clusters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase = {}
_lowerCAmelCase = find_extremes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase = element
_lowerCAmelCase = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase = dataset.filter(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : idx not in remove_indices , with_indices=_SCREAMING_SNAKE_CASE )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase = extreme_dict[element['''base_index''']]['''copies''']
print(f'''Original dataset size: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Number of duplicate clusters: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Unique files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Filtered dataset size: {len(_SCREAMING_SNAKE_CASE )}''' )
return ds_filter, duplicate_clusters
| 664
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Union[str, Any] = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
A : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 636
|
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_UpperCamelCase , _UpperCamelCase ) ) )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
__lowerCAmelCase = (
"Wrong input data's dimensions... "
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(_UpperCamelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
__lowerCAmelCase = (
"Wrong input data's shape... "
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(_UpperCamelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
__lowerCAmelCase = (
"Input data have different datatype... "
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(_UpperCamelCase )
__lowerCAmelCase = []
for value in value_array:
__lowerCAmelCase = euclidean(_UpperCamelCase , dataset[0] )
__lowerCAmelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
__lowerCAmelCase = euclidean(_UpperCamelCase , _UpperCamelCase )
if dist > temp_dist:
__lowerCAmelCase = temp_dist
__lowerCAmelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return np.dot(_UpperCamelCase , _UpperCamelCase ) / (norm(_UpperCamelCase ) * norm(_UpperCamelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 636
| 1
|
"""simple docstring"""
import math
def _snake_case ( _snake_case : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case ( _snake_case : float = 0.1 ):
lowerCAmelCase : Optional[Any] = 3
lowerCAmelCase : Any = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
snake_case__ : int = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _snake_case ( _snake_case : str , _snake_case : Any , _snake_case : str=None , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Tuple=None , _snake_case : str=None , _snake_case : Any=None , ):
if attention_mask is None:
lowerCAmelCase : List[str] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCAmelCase : Optional[int] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCAmelCase : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase : int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class snake_case_:
def __init__( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : int=1_3 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Dict=9_9 , UpperCamelCase_ : Optional[int]=1_6 , UpperCamelCase_ : str=2 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : str=3_2 , UpperCamelCase_ : str=2 , UpperCamelCase_ : Tuple=1 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Any=0.02 , ):
lowerCAmelCase : Tuple = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : Optional[int] = is_training
lowerCAmelCase : int = use_labels
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : str = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : Union[str, Any] = eos_token_id
lowerCAmelCase : Dict = pad_token_id
lowerCAmelCase : Optional[Any] = bos_token_id
lowerCAmelCase : List[str] = initializer_range
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCAmelCase : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCamelCase_ , )
lowerCAmelCase : Union[str, Any] = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def lowerCamelCase__ ( self : str ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple ):
lowerCAmelCase : int = 2_0
lowerCAmelCase : Tuple = model_class_name(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : str = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowerCAmelCase : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : List[Any] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : List[str] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Union[str, Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Optional[int] = 2_0
lowerCAmelCase : List[Any] = model_class_name(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : str = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : Dict = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Dict = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ )
lowerCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class snake_case_( unittest.TestCase ):
__UpperCamelCase = 99
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : List[Any] = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowerCAmelCase : List[Any] = input_ids.shape[0]
lowerCAmelCase : Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Any = self._get_config_and_data()
lowerCAmelCase : Any = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = lm_model(input_ids=UpperCamelCase_ )
lowerCAmelCase : Tuple = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowerCAmelCase : int = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
lowerCAmelCase : List[str] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
lowerCAmelCase : List[Any] = lm_model(input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ )
lowerCAmelCase : str = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Any = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
lowerCAmelCase : Optional[int] = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
lowerCAmelCase : str = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCamelCase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class snake_case_( a__ , unittest.TestCase , a__ ):
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__UpperCamelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Any = FlaxBlenderbotModelTester(self )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Optional[int] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = model_class(UpperCamelCase_ )
@jax.jit
def encode_jitted(UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : List[str] ):
return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : List[str] = encode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : int = encode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Tuple = model_class(UpperCamelCase_ )
lowerCAmelCase : int = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowerCAmelCase : List[Any] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int ):
return model.decode(
decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : str = decode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCAmelCase : int = np.ones((1, 1) ) * model.config.eos_token_id
lowerCAmelCase : List[str] = model(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 1_5, '''max_length''': 2_5}
lowerCAmelCase : List[str] = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
lowerCAmelCase : Tuple = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
lowerCAmelCase : List[Any] = ['''Sam''']
lowerCAmelCase : str = tokenizer(UpperCamelCase_ , return_tensors='''jax''' )
lowerCAmelCase : Union[str, Any] = model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Tuple = '''Sam is a great name. It means "sun" in Gaelic.'''
lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase_ , **UpperCamelCase_ )
assert generated_txt[0].strip() == tgt_text
| 637
| 0
|
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
|
import numpy
class snake_case_ :
def __init__( self : List[str] , _snake_case : numpy.ndarray , _snake_case : numpy.ndarray )->None:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__lowerCAmelCase : Tuple = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__lowerCAmelCase : Union[str, Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__lowerCAmelCase : Dict = numpy.random.rand(3 , 1 )
# Real output values provided.
__lowerCAmelCase : Optional[int] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__lowerCAmelCase : Tuple = numpy.zeros(output_array.shape )
def UpperCAmelCase__ ( self : int )->numpy.ndarray:
'''simple docstring'''
__lowerCAmelCase : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__lowerCAmelCase : str = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__lowerCAmelCase : Any = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase__ ( self : int )->None:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__lowerCAmelCase : Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__lowerCAmelCase : Dict = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase__ ( self : Any , _snake_case : numpy.ndarray , _snake_case : int , _snake_case : bool )->None:
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
__lowerCAmelCase : Tuple = self.feedforward()
self.back_propagation()
if give_loss:
__lowerCAmelCase : List[Any] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : numpy.ndarray )->int:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = input_arr
__lowerCAmelCase : str = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__lowerCAmelCase : List[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__lowerCAmelCase : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :numpy.ndarray ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :numpy.ndarray ) -> numpy.ndarray:
return (value) * (1 - (value))
def _SCREAMING_SNAKE_CASE ( ) -> int:
__lowerCAmelCase : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__lowerCAmelCase : Optional[Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__lowerCAmelCase : Union[str, Any] = TwoHiddenLayerNeuralNetwork(
input_array=SCREAMING_SNAKE_CASE , output_array=SCREAMING_SNAKE_CASE )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=SCREAMING_SNAKE_CASE , iterations=10 , give_loss=SCREAMING_SNAKE_CASE )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 504
| 0
|
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( enum.Enum ):
UpperCAmelCase = 0
UpperCAmelCase = 1
@add_end_docstrings(lowerCamelCase__ )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = '''generated'''
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ) -> List[Any]:
super().__init__(*lowercase_ , **lowercase_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def a_ ( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ) -> Union[str, Any]:
_a = {}
if truncation is not None:
_a = truncation
_a = generate_kwargs
_a = {}
if return_tensors is not None and return_type is None:
_a = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_a = return_type
if clean_up_tokenization_spaces is not None:
_a = clean_up_tokenization_spaces
if stop_sequence is not None:
_a = self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
if len(lowercase_ ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
_a = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
return True
def a_ ( self , *__UpperCamelCase , __UpperCamelCase ) -> Tuple:
_a = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , lowercase_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" )
_a = ([prefix + arg for arg in args[0]],)
_a = True
elif isinstance(args[0] , lowercase_ ):
_a = (prefix + args[0],)
_a = False
else:
raise ValueError(
f" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`" )
_a = self.tokenizer(*lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *__UpperCamelCase , **__UpperCamelCase ) -> Dict:
_a = super().__call__(*lowercase_ , **lowercase_ )
if (
isinstance(args[0] , lowercase_ )
and all(isinstance(lowercase_ , lowercase_ ) for el in args[0] )
and all(len(lowercase_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def a_ ( self , __UpperCamelCase , __UpperCamelCase=TruncationStrategy.DO_NOT_TRUNCATE , **__UpperCamelCase ) -> Optional[int]:
_a = self._parse_and_tokenize(lowercase_ , truncation=lowercase_ , **lowercase_ )
return inputs
def a_ ( self , __UpperCamelCase , **__UpperCamelCase ) -> str:
if self.framework == "pt":
_a , _a = model_inputs["input_ids"].shape
elif self.framework == "tf":
_a , _a = tf.shape(model_inputs["input_ids"] ).numpy()
_a = generate_kwargs.get("min_length" , self.model.config.min_length )
_a = generate_kwargs.get("max_length" , self.model.config.max_length )
self.check_inputs(lowercase_ , generate_kwargs["min_length"] , generate_kwargs["max_length"] )
_a = self.model.generate(**lowercase_ , **lowercase_ )
_a = output_ids.shape[0]
if self.framework == "pt":
_a = output_ids.reshape(lowercase_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
_a = tf.reshape(lowercase_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def a_ ( self , __UpperCamelCase , __UpperCamelCase=ReturnType.TEXT , __UpperCamelCase=False ) -> Tuple:
_a = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_a = {f"{self.return_name}_token_ids": output_ids}
elif return_type == ReturnType.TEXT:
_a = {
f"{self.return_name}_text": self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
}
records.append(lowercase_ )
return records
@add_end_docstrings(lowerCamelCase__ )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = '''summary'''
def __call__( self , *__UpperCamelCase , **__UpperCamelCase ) -> Optional[int]:
return super().__call__(*lowercase_ , **lowercase_ )
def a_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> bool:
if max_length < min_length:
logger.warning(f"Your min_length={min_length} must be inferior than your max_length={max_length}." )
if input_length < max_length:
logger.warning(
f"Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is "
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f"consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})" )
@add_end_docstrings(lowerCamelCase__ )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = '''translation'''
def a_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if input_length > 0.9 * max_length:
logger.warning(
f"Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider "
"increasing your max_length manually, e.g. translator('...', max_length=400)" )
return True
def a_ ( self , *__UpperCamelCase , __UpperCamelCase=TruncationStrategy.DO_NOT_TRUNCATE , __UpperCamelCase=None , __UpperCamelCase=None ) -> List[str]:
if getattr(self.tokenizer , "_build_translation_inputs" , lowercase_ ):
return self.tokenizer._build_translation_inputs(
*lowercase_ , return_tensors=self.framework , truncation=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_ )
else:
return super()._parse_and_tokenize(*lowercase_ , truncation=lowercase_ )
def a_ ( self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ) -> List[Any]:
_a , _a , _a = super()._sanitize_parameters(**lowercase_ )
if src_lang is not None:
_a = src_lang
if tgt_lang is not None:
_a = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_a = kwargs.get("task" , self.task )
_a = task.split("_" )
if task and len(lowercase_ ) == 4:
# translation, XX, to YY
_a = items[1]
_a = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *__UpperCamelCase , **__UpperCamelCase ) -> Union[str, Any]:
return super().__call__(*lowercase_ , **lowercase_ )
| 716
|
'''simple docstring'''
lowercase__ = 65_521
def __UpperCamelCase ( __lowerCamelCase : str ) -> int:
'''simple docstring'''
_a = 1
_a = 0
for plain_chr in plain_text:
_a = (a + ord(__lowerCamelCase )) % MOD_ADLER
_a = (b + a) % MOD_ADLER
return (b << 16) | a
| 276
| 0
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def UpperCAmelCase_ ( _A , _A = "cpu" , _A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = torch.load(_A , map_location=_A )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_A , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
SCREAMING_SNAKE_CASE__ = v.half()
if save_path is None: # overwrite src_path
SCREAMING_SNAKE_CASE__ = src_path
torch.save(_A , _A )
if __name__ == "__main__":
fire.Fire(convert)
| 493
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase__ :
"""simple docstring"""
@staticmethod
def lowercase_ ( *__lowerCamelCase : int , **__lowerCamelCase : List[Any] ) -> List[str]:
pass
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_SCREAMING_SNAKE_CASE : Optional[Any] = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
a = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = pipeline(
'''document-question-answering''' , model=__lowerCamelCase , tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , '''''' ) ) )
SCREAMING_SNAKE_CASE__ = '''What is the placebo?'''
SCREAMING_SNAKE_CASE__ = [
{
'''image''': load_image(__lowerCamelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def lowercase_ ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = dqa_pipeline(__lowerCamelCase , top_k=2 )
self.assertEqual(
__lowerCamelCase , [
[
{'''score''': ANY(__lowerCamelCase ), '''answer''': ANY(__lowerCamelCase ), '''start''': ANY(__lowerCamelCase ), '''end''': ANY(__lowerCamelCase )},
{'''score''': ANY(__lowerCamelCase ), '''answer''': ANY(__lowerCamelCase ), '''start''': ANY(__lowerCamelCase ), '''end''': ANY(__lowerCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = '''How many cats are there?'''
SCREAMING_SNAKE_CASE__ = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
SCREAMING_SNAKE_CASE__ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(__lowerCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
SCREAMING_SNAKE_CASE__ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , words=__lowerCamelCase , boxes=__lowerCamelCase , top_k=2 )
self.assertEqual(__lowerCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowercase_ ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowercase_ ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowercase_ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__lowerCamelCase , revision='''3dc6de3''' , )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
SCREAMING_SNAKE_CASE__ = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE__ = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__lowerCamelCase , revision='''3dc6de3''' , max_seq_len=50 , )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
SCREAMING_SNAKE_CASE__ = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE__ = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def lowercase_ ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]:
pass
| 493
| 1
|
'''simple docstring'''
def _lowerCAmelCase( UpperCAmelCase_ : str ) -> list[int]:
lowerCAmelCase__ = [0 for i in range(len(UpperCAmelCase_ ) )]
# initialize interval's left pointer and right pointer
lowerCAmelCase__ ,lowerCAmelCase__ = 0, 0
for i in range(1 , len(UpperCAmelCase_ ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCAmelCase__ = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCAmelCase__ = min_edge
while go_next(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCAmelCase__ ,lowerCAmelCase__ = i, i + z_result[i] - 1
return z_result
def _lowerCAmelCase( UpperCAmelCase_ : int , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : str ) -> bool:
return i + z_result[i] < len(UpperCAmelCase_ ) and s[z_result[i]] == s[i + z_result[i]]
def _lowerCAmelCase( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> int:
lowerCAmelCase__ = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCAmelCase__ = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCAmelCase_ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211
|
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
_UpperCamelCase = input("""Enter image url: """).strip()
print(f'Downloading image from {url} ...')
_UpperCamelCase = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
_UpperCamelCase = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
_UpperCamelCase = requests.get(image_url).content
_UpperCamelCase = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(f'Done. Image saved to disk as {file_name}.')
| 211
| 1
|
from __future__ import annotations
from typing import Generic, TypeVar
lowerCamelCase__ : Tuple = TypeVar("""T""")
class __magic_name__ (Generic[T] ):
'''simple docstring'''
def __init__( self:List[str] , _a:T ):
snake_case__ = data
snake_case__ = self
snake_case__ = 0
class __magic_name__ (Generic[T] ):
'''simple docstring'''
def __init__( self:Any ):
# map from node name to the node object
snake_case__ = {}
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:T ):
# create a new set with x as its member
snake_case__ = DisjointSetTreeNode(_a )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:T ):
# find the set x belongs to (with path-compression)
snake_case__ = self.map[data]
if elem_ref != elem_ref.parent:
snake_case__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:DisjointSetTreeNode[T] , _a:DisjointSetTreeNode[T] ):
# helper function for union operation
if nodea.rank > nodea.rank:
snake_case__ = nodea
else:
snake_case__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:T , _a:T ):
# merge 2 disjoint sets
self.link(self.find_set(_a ) , self.find_set(_a ) )
class __magic_name__ (Generic[T] ):
'''simple docstring'''
def __init__( self:List[str] ):
# connections: map from the node to the neighbouring nodes (with weights)
snake_case__ = {}
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:T ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
snake_case__ = {}
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:T , _a:T , _a:int ):
# add an edge with the given weight
self.add_node(_a )
self.add_node(_a )
snake_case__ = weight
snake_case__ = weight
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = []
snake_case__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _a : x[2] )
# creating the disjoint set
snake_case__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_a )
# MST generation
snake_case__ = 0
snake_case__ = 0
snake_case__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
snake_case__ , snake_case__ , snake_case__ = edges[index]
index += 1
snake_case__ = disjoint_set.find_set(_a )
snake_case__ = disjoint_set.find_set(_a )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_a , _a , _a )
disjoint_set.union(_a , _a )
return graph
| 33
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase ={
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 333
| 0
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __lowerCamelCase ( _lowercase ) -> Any:
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
UpperCamelCase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
UpperCamelCase = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' )
UpperCamelCase = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' )
UpperCamelCase = key.replace('heads.cmd.itm_head.cls' , 'itm_head' )
UpperCamelCase = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' )
UpperCamelCase = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' )
UpperCamelCase = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' )
UpperCamelCase = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' )
UpperCamelCase = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' )
UpperCamelCase = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' )
UpperCamelCase = key.replace('image_encoder.module' , 'flava.image_model' )
UpperCamelCase = key.replace('text_encoder.module' , 'flava.text_model' )
UpperCamelCase = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' )
UpperCamelCase = key.replace('mm_encoder.module' , 'flava.multimodal_model' )
UpperCamelCase = key.replace('text_projection' , 'flava.text_projection' )
UpperCamelCase = key.replace('image_projection' , 'flava.image_projection' )
UpperCamelCase = value.float()
for key, value in codebook_state_dict.items():
UpperCamelCase = value
return upgrade
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Tuple:
if config_path is not None:
UpperCamelCase = FlavaConfig.from_pretrained(__lowercase )
else:
UpperCamelCase = FlavaConfig()
UpperCamelCase = FlavaForPreTraining(__lowercase ).eval()
UpperCamelCase = convert_dalle_checkpoint(__lowercase , __lowercase , save_checkpoint=__lowercase )
if os.path.exists(__lowercase ):
UpperCamelCase = torch.load(__lowercase , map_location='cpu' )
else:
UpperCamelCase = torch.hub.load_state_dict_from_url(__lowercase , map_location='cpu' )
UpperCamelCase = upgrade_state_dict(__lowercase , __lowercase )
hf_model.load_state_dict(__lowercase )
UpperCamelCase = hf_model.state_dict()
UpperCamelCase = count_parameters(__lowercase )
UpperCamelCase = count_parameters(__lowercase ) + count_parameters(__lowercase )
assert torch.allclose(__lowercase , __lowercase , atol=1e-3 )
hf_model.save_pretrained(__lowercase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--codebook_path''', default=None, type=str, help='''Path to flava codebook checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
_snake_case = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 704
|
def __lowerCamelCase ( _lowercase , _lowercase ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"{price_plus_tax(100, 0.25) = }")
print(F"{price_plus_tax(1_25.50, 0.05) = }")
| 170
| 0
|
from math import isclose, sqrt
def __a ( A__ : float , A__ : float , A__ : float ):
SCREAMING_SNAKE_CASE = point_y / 4 / point_x
SCREAMING_SNAKE_CASE = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
SCREAMING_SNAKE_CASE = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
SCREAMING_SNAKE_CASE = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
SCREAMING_SNAKE_CASE = outgoing_gradient**2 + 4
SCREAMING_SNAKE_CASE = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
SCREAMING_SNAKE_CASE = (point_y - outgoing_gradient * point_x) ** 2 - 100
SCREAMING_SNAKE_CASE = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
SCREAMING_SNAKE_CASE = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
SCREAMING_SNAKE_CASE = x_minus if isclose(A__ , A__ ) else x_plus
SCREAMING_SNAKE_CASE = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def __a ( A__ : float = 1.4 , A__ : float = -9.6 ):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = first_x_coord
SCREAMING_SNAKE_CASE = first_y_coord
SCREAMING_SNAKE_CASE = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = next_point(A__ , A__ , A__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'{solution() = }')
| 16
|
'''simple docstring'''
import numpy as np
def A__ ( A_ ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 497
| 0
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : Tuple = logging.get_logger(__name__)
_UpperCamelCase : int = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def snake_case ( snake_case : str ) -> str:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
lowerCAmelCase = k.replace(lowerCamelCase_ , lowerCamelCase_ )
if k.startswith('encoder' ):
lowerCAmelCase = k.replace('.attn' , '.self_attn' )
lowerCAmelCase = k.replace('norm1' , 'self_attn_layer_norm' )
lowerCAmelCase = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
lowerCAmelCase = k.replace('norm1' , 'self_attn_layer_norm' )
lowerCAmelCase = k.replace('norm2' , 'encoder_attn_layer_norm' )
lowerCAmelCase = k.replace('norm3' , 'final_layer_norm' )
return k
def snake_case ( snake_case : Optional[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
lowerCAmelCase = sd.pop(lowerCamelCase_ )
lowerCAmelCase = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
lowerCAmelCase = v
_UpperCamelCase : List[str] = ["START"]
@torch.no_grad()
def snake_case ( snake_case : List[Any] , snake_case : List[str] , snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = torch.load(lowerCamelCase_ , map_location='cpu' )
lowerCAmelCase = model['model']
lowerCAmelCase = BlenderbotConfig.from_json_file(lowerCamelCase_ )
lowerCAmelCase = BlenderbotForConditionalGeneration(lowerCamelCase_ )
lowerCAmelCase = m.model.state_dict().keys()
lowerCAmelCase = []
lowerCAmelCase = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
lowerCAmelCase = rename_state_dict_key(lowerCamelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
lowerCAmelCase = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(lowerCamelCase_ )
m.model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
m.half()
m.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
_UpperCamelCase : List[str] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 706
|
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def snake_case ( snake_case : List[str] , snake_case : int="shi-labs/oneformer_demo" ) -> Any:
"""simple docstring"""
with open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) as f:
lowerCAmelCase = json.load(snake_case )
lowerCAmelCase = {}
lowerCAmelCase = []
lowerCAmelCase = []
for key, info in class_info.items():
lowerCAmelCase = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(snake_case ) )
lowerCAmelCase = thing_ids
lowerCAmelCase = class_names
return metadata
class _snake_case ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=4_00 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2_55 , _SCREAMING_SNAKE_CASE="shi-labs/oneformer_demo" , _SCREAMING_SNAKE_CASE="ade20k_panoptic.json" , _SCREAMING_SNAKE_CASE=10 , ):
'''simple docstring'''
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = {'shortest_edge': 32, 'longest_edge': 13_33} if size is None else size
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
lowerCAmelCase = class_info_file
lowerCAmelCase = prepare_metadata(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = num_text
lowerCAmelCase = repo_path
# for the post_process_functions
lowerCAmelCase = 2
lowerCAmelCase = 10
lowerCAmelCase = 10
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = num_labels
lowerCAmelCase = do_reduce_labels
lowerCAmelCase = ignore_index
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
if not batched:
lowerCAmelCase = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
lowerCAmelCase , lowerCAmelCase = image.size
else:
lowerCAmelCase , lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase = int(self.size['shortest_edge'] * h / w )
lowerCAmelCase = self.size['shortest_edge']
elif w > h:
lowerCAmelCase = self.size['shortest_edge']
lowerCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
lowerCAmelCase = self.size['shortest_edge']
lowerCAmelCase = self.size['shortest_edge']
else:
lowerCAmelCase = []
for image in image_inputs:
lowerCAmelCase , lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
lowerCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _snake_case ( a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Tuple = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
SCREAMING_SNAKE_CASE : str = image_processing_class
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = OneFormerImageProcessorTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'ignore_index' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'class_info_file' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'num_text' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'repo_path' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'metadata' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_reduce_labels' ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
lowerCAmelCase = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processing_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase = self.image_processing_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_processor(
_SCREAMING_SNAKE_CASE , ['semantic'] * len(_SCREAMING_SNAKE_CASE ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processing_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase = self.image_processing_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_processor(
_SCREAMING_SNAKE_CASE , ['semantic'] * len(_SCREAMING_SNAKE_CASE ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processing_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase = self.image_processing_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_processor(
_SCREAMING_SNAKE_CASE , ['semantic'] * len(_SCREAMING_SNAKE_CASE ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="np" ):
'''simple docstring'''
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
lowerCAmelCase = self.image_processing_tester.num_labels
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
if with_segmentation_maps:
lowerCAmelCase = num_labels
if is_instance_map:
lowerCAmelCase = list(range(_SCREAMING_SNAKE_CASE ) ) * 2
lowerCAmelCase = dict(enumerate(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
lowerCAmelCase = [Image.fromarray(_SCREAMING_SNAKE_CASE ) for annotation in annotations]
lowerCAmelCase = image_processor(
_SCREAMING_SNAKE_CASE , ['semantic'] * len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , return_tensors='pt' , instance_id_to_semantic_id=_SCREAMING_SNAKE_CASE , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE , )
return inputs
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
def common(_SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ):
lowerCAmelCase = self.comm_get_image_processor_inputs(
with_segmentation_maps=_SCREAMING_SNAKE_CASE , is_instance_map=_SCREAMING_SNAKE_CASE , segmentation_type=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = inputs['mask_labels']
lowerCAmelCase = inputs['class_labels']
lowerCAmelCase = inputs['pixel_values']
lowerCAmelCase = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=_SCREAMING_SNAKE_CASE )
common(is_instance_map=_SCREAMING_SNAKE_CASE , segmentation_type='pil' )
common(is_instance_map=_SCREAMING_SNAKE_CASE , segmentation_type='pil' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = np.zeros((20, 50) )
lowerCAmelCase = 1
lowerCAmelCase = 1
lowerCAmelCase = 1
lowerCAmelCase = binary_mask_to_rle(_SCREAMING_SNAKE_CASE )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(_SCREAMING_SNAKE_CASE )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
lowerCAmelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(_SCREAMING_SNAKE_CASE , target_sizes=_SCREAMING_SNAKE_CASE )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCAmelCase = image_processor.post_process_instance_segmentation(_SCREAMING_SNAKE_CASE , threshold=0 )
self.assertTrue(len(_SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCAmelCase = image_processor.post_process_panoptic_segmentation(_SCREAMING_SNAKE_CASE , threshold=0 )
self.assertTrue(len(_SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 514
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase_ : Tuple = logging.get_logger(__name__)
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
A__ = ["""pixel_values"""]
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = True , snake_case__ = None , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
_SCREAMING_SNAKE_CASE : Any = size if size is not None else {"shortest_edge": 256}
_SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(snake_case__ , default_to_square=snake_case__ )
_SCREAMING_SNAKE_CASE : int = crop_size if crop_size is not None else {"height": 224, "width": 224}
_SCREAMING_SNAKE_CASE : int = get_size_dict(snake_case__ )
_SCREAMING_SNAKE_CASE : List[Any] = do_resize
_SCREAMING_SNAKE_CASE : Any = size
_SCREAMING_SNAKE_CASE : Any = resample
_SCREAMING_SNAKE_CASE : Any = do_center_crop
_SCREAMING_SNAKE_CASE : int = crop_size
_SCREAMING_SNAKE_CASE : List[Any] = do_rescale
_SCREAMING_SNAKE_CASE : Tuple = rescale_factor
_SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(snake_case__ , size=size["shortest_edge"] , default_to_square=snake_case__ )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = get_size_dict(snake_case__ )
return center_crop(snake_case__ , size=(size["height"], size["width"]) , data_format=snake_case__ , **snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ ):
"""simple docstring"""
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE : int = size if size is not None else self.size
_SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(snake_case__ , default_to_square=snake_case__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_SCREAMING_SNAKE_CASE : Optional[Any] = crop_size if crop_size is not None else self.crop_size
_SCREAMING_SNAKE_CASE : Dict = get_size_dict(snake_case__ )
_SCREAMING_SNAKE_CASE : str = do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE : Tuple = image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE : Optional[Any] = image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE : Optional[Any] = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE : Optional[int] = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE : Optional[Any] = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_center_crop:
_SCREAMING_SNAKE_CASE : Dict = [self.center_crop(image=snake_case__ , size=snake_case__ ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE : List[Any] = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
_SCREAMING_SNAKE_CASE : Optional[Any] = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
_SCREAMING_SNAKE_CASE : Any = {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 572
|
"""simple docstring"""
def _lowerCAmelCase ( ) -> int:
return [
a * b * (1_0_0_0 - a - b)
for a in range(1, 9_9_9 )
for b in range(lowerCamelCase__, 9_9_9 )
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 572
| 1
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _SCREAMING_SNAKE_CASE ( snake_case ) -> bool:
_UpperCAmelCase = int(number**0.5 )
return number == sq * sq
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> tuple[int, int]:
_UpperCAmelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_UpperCAmelCase = x_den * y_den * z_den
_UpperCAmelCase = gcd(snake_case , snake_case )
top //= hcf
bottom //= hcf
return top, bottom
def _SCREAMING_SNAKE_CASE ( snake_case = 3_5 ) -> int:
_UpperCAmelCase = set()
_UpperCAmelCase = 42
_UpperCAmelCase = Fraction(0 )
_UpperCAmelCase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_UpperCAmelCase = x_num * y_den + x_den * y_num
_UpperCAmelCase = x_den * y_den
_UpperCAmelCase = gcd(snake_case , snake_case )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
unique_s.add(snake_case )
# n=2
_UpperCAmelCase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_UpperCAmelCase = x_den * x_den * y_den * y_den
if is_sq(snake_case ) and is_sq(snake_case ):
_UpperCAmelCase = int(sqrt(snake_case ) )
_UpperCAmelCase = int(sqrt(snake_case ) )
_UpperCAmelCase = gcd(snake_case , snake_case )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
unique_s.add(snake_case )
# n=-1
_UpperCAmelCase = x_num * y_num
_UpperCAmelCase = x_den * y_num + x_num * y_den
_UpperCAmelCase = gcd(snake_case , snake_case )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
unique_s.add(snake_case )
# n=2
_UpperCAmelCase = x_num * x_num * y_num * y_num
_UpperCAmelCase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(snake_case ) and is_sq(snake_case ):
_UpperCAmelCase = int(sqrt(snake_case ) )
_UpperCAmelCase = int(sqrt(snake_case ) )
_UpperCAmelCase = gcd(snake_case , snake_case )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
unique_s.add(snake_case )
for num, den in unique_s:
total += Fraction(snake_case , snake_case )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'{solution() = }')
| 175
|
# flake8: noqa
# Lint as: python3
a = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 175
| 1
|
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def _A (UpperCamelCase : Dict ) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = torch.exp(UpperCamelCase )
lowerCamelCase__ : str = torch.sum(UpperCamelCase , dim=1 ) # sum of exp(x_i)
lowerCamelCase__ : Union[str, Any] = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(UpperCamelCase ) - B / A
class __A ( nn.Module ):
def __init__(self , __magic_name__ ):
super().__init__()
lowerCamelCase__ : Union[str, Any] = config.output_attentions
lowerCamelCase__ : Tuple = config.output_hidden_states
lowerCamelCase__ : Union[str, Any] = nn.ModuleList([BertLayer(__magic_name__ ) for _ in range(config.num_hidden_layers )] )
lowerCamelCase__ : Union[str, Any] = nn.ModuleList([BertHighway(__magic_name__ ) for _ in range(config.num_hidden_layers )] )
lowerCamelCase__ : Optional[Any] = [-1 for _ in range(config.num_hidden_layers )]
def _snake_case (self , __magic_name__ ):
if (type(__magic_name__ ) is float) or (type(__magic_name__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
lowerCamelCase__ : str = x
else:
lowerCamelCase__ : List[str] = x
def _snake_case (self , __magic_name__ ):
lowerCamelCase__ : Optional[Any] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def _snake_case (self , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , ):
lowerCamelCase__ : Optional[int] = ()
lowerCamelCase__ : Any = ()
lowerCamelCase__ : int = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
lowerCamelCase__ : int = all_hidden_states + (hidden_states,)
lowerCamelCase__ : int = layer_module(
__magic_name__ , __magic_name__ , head_mask[i] , __magic_name__ , __magic_name__ )
lowerCamelCase__ : str = layer_outputs[0]
if self.output_attentions:
lowerCamelCase__ : Tuple = all_attentions + (layer_outputs[1],)
lowerCamelCase__ : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
lowerCamelCase__ : Tuple = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowerCamelCase__ : Optional[int] = current_outputs + (all_attentions,)
lowerCamelCase__ : int = self.highway[i](__magic_name__ )
# logits, pooled_output
if not self.training:
lowerCamelCase__ : Any = highway_exit[0]
lowerCamelCase__ : Optional[int] = entropy(__magic_name__ )
lowerCamelCase__ : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowerCamelCase__ : List[str] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowerCamelCase__ : Union[str, Any] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__magic_name__ , i + 1 )
else:
lowerCamelCase__ : Any = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowerCamelCase__ : List[str] = all_hidden_states + (hidden_states,)
lowerCamelCase__ : Tuple = (hidden_states,)
if self.output_hidden_states:
lowerCamelCase__ : int = outputs + (all_hidden_states,)
if self.output_attentions:
lowerCamelCase__ : Optional[int] = outputs + (all_attentions,)
lowerCamelCase__ : Dict = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , A_ , )
class __A ( A_ ):
def __init__(self , __magic_name__ ):
super().__init__(__magic_name__ )
lowerCamelCase__ : int = config
lowerCamelCase__ : Optional[int] = BertEmbeddings(__magic_name__ )
lowerCamelCase__ : Dict = DeeBertEncoder(__magic_name__ )
lowerCamelCase__ : int = BertPooler(__magic_name__ )
self.init_weights()
def _snake_case (self ):
self.encoder.init_highway_pooler(self.pooler )
def _snake_case (self ):
return self.embeddings.word_embeddings
def _snake_case (self , __magic_name__ ):
lowerCamelCase__ : int = value
def _snake_case (self , __magic_name__ ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__magic_name__ )
@add_start_docstrings_to_model_forward(__magic_name__ )
def _snake_case (self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
lowerCamelCase__ : Dict = input_ids.size()
elif inputs_embeds is not None:
lowerCamelCase__ : Optional[Any] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
lowerCamelCase__ : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCamelCase__ : Any = torch.ones(__magic_name__ , device=__magic_name__ )
if encoder_attention_mask is None:
lowerCamelCase__ : Tuple = torch.ones(__magic_name__ , device=__magic_name__ )
if token_type_ids is None:
lowerCamelCase__ : List[Any] = torch.zeros(__magic_name__ , dtype=torch.long , device=__magic_name__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCamelCase__ : torch.Tensor = self.get_extended_attention_mask(__magic_name__ , __magic_name__ , __magic_name__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowerCamelCase__ : List[str] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowerCamelCase__ : List[str] = encoder_attention_mask[:, None, None, :]
lowerCamelCase__ : Optional[int] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
lowerCamelCase__ : int = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCamelCase__ : List[str] = self.get_head_mask(__magic_name__ , self.config.num_hidden_layers )
lowerCamelCase__ : Union[str, Any] = self.embeddings(
input_ids=__magic_name__ , position_ids=__magic_name__ , token_type_ids=__magic_name__ , inputs_embeds=__magic_name__ )
lowerCamelCase__ : List[str] = self.encoder(
__magic_name__ , attention_mask=__magic_name__ , head_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , )
lowerCamelCase__ : str = encoder_outputs[0]
lowerCamelCase__ : List[str] = self.pooler(__magic_name__ )
lowerCamelCase__ : Optional[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __A ( A_ ):
def __init__(self , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : Optional[int] = message
lowerCamelCase__ : List[str] = exit_layer # start from 1!
class __A ( nn.Module ):
def __init__(self , __magic_name__ ):
super().__init__()
lowerCamelCase__ : int = BertPooler(__magic_name__ )
lowerCamelCase__ : Dict = nn.Dropout(config.hidden_dropout_prob )
lowerCamelCase__ : Optional[Any] = nn.Linear(config.hidden_size , config.num_labels )
def _snake_case (self , __magic_name__ ):
# Pooler
lowerCamelCase__ : Dict = encoder_outputs[0]
lowerCamelCase__ : Any = self.pooler(__magic_name__ )
# "return" pooler_output
# BertModel
lowerCamelCase__ : str = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowerCamelCase__ : int = bmodel_output[1]
lowerCamelCase__ : Optional[int] = self.dropout(__magic_name__ )
lowerCamelCase__ : Optional[Any] = self.classifier(__magic_name__ )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , A_ , )
class __A ( A_ ):
def __init__(self , __magic_name__ ):
super().__init__(__magic_name__ )
lowerCamelCase__ : str = config.num_labels
lowerCamelCase__ : List[str] = config.num_hidden_layers
lowerCamelCase__ : Tuple = DeeBertModel(__magic_name__ )
lowerCamelCase__ : List[Any] = nn.Dropout(config.hidden_dropout_prob )
lowerCamelCase__ : List[str] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(__magic_name__ )
def _snake_case (self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=-1 , __magic_name__=False , ):
lowerCamelCase__ : Optional[int] = self.num_layers
try:
lowerCamelCase__ : int = self.bert(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , position_ids=__magic_name__ , head_mask=__magic_name__ , inputs_embeds=__magic_name__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowerCamelCase__ : List[str] = outputs[1]
lowerCamelCase__ : Tuple = self.dropout(__magic_name__ )
lowerCamelCase__ : Union[str, Any] = self.classifier(__magic_name__ )
lowerCamelCase__ : Optional[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCamelCase__ : Tuple = e.message
lowerCamelCase__ : str = e.exit_layer
lowerCamelCase__ : Optional[int] = outputs[0]
if not self.training:
lowerCamelCase__ : Tuple = entropy(__magic_name__ )
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Optional[int] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCamelCase__ : int = MSELoss()
lowerCamelCase__ : Optional[Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCamelCase__ : Any = CrossEntropyLoss()
lowerCamelCase__ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowerCamelCase__ : Tuple = []
for highway_exit in outputs[-1]:
lowerCamelCase__ : Any = highway_exit[0]
if not self.training:
highway_logits_all.append(__magic_name__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCamelCase__ : Tuple = MSELoss()
lowerCamelCase__ : Dict = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCamelCase__ : Dict = CrossEntropyLoss()
lowerCamelCase__ : List[str] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__magic_name__ )
if train_highway:
lowerCamelCase__ : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCamelCase__ : Union[str, Any] = (loss,) + outputs
if not self.training:
lowerCamelCase__ : Tuple = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCamelCase__ : int = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 157
|
from ...configuration_utils import PretrainedConfig
class __A ( A_ ):
UpperCamelCase :str = '''bert-generation'''
def __init__(self , __magic_name__=50358 , __magic_name__=1024 , __magic_name__=24 , __magic_name__=16 , __magic_name__=4096 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=0.02 , __magic_name__=1E-12 , __magic_name__=0 , __magic_name__=2 , __magic_name__=1 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Any = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : int = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : Dict = position_embedding_type
lowerCamelCase__ : Optional[Any] = use_cache
| 157
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """sew"""
def __init__( self : List[Any] , lowercase : int=32 , lowercase : List[str]=768 , lowercase : Dict=12 , lowercase : str=12 , lowercase : str=3_072 , lowercase : Optional[int]=2 , lowercase : List[str]="gelu" , lowercase : List[str]=0.1 , lowercase : Tuple=0.1 , lowercase : Dict=0.1 , lowercase : Any=0.0 , lowercase : Dict=0.1 , lowercase : Optional[int]=0.1 , lowercase : List[str]=0.02 , lowercase : Dict=1E-5 , lowercase : Tuple="group" , lowercase : int="gelu" , lowercase : Any=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase : Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase : Any=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase : List[str]=False , lowercase : Tuple=128 , lowercase : int=16 , lowercase : Union[str, Any]=True , lowercase : List[str]=0.05 , lowercase : Optional[int]=10 , lowercase : Any=2 , lowercase : Optional[Any]=0.0 , lowercase : Optional[Any]=10 , lowercase : int=0 , lowercase : Optional[int]="mean" , lowercase : List[Any]=False , lowercase : str=False , lowercase : int=256 , lowercase : str=0 , lowercase : List[Any]=1 , lowercase : List[Any]=2 , **lowercase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
__lowercase = hidden_size
__lowercase = feat_extract_norm
__lowercase = feat_extract_activation
__lowercase = list(lowercase )
__lowercase = list(lowercase )
__lowercase = list(lowercase )
__lowercase = conv_bias
__lowercase = num_conv_pos_embeddings
__lowercase = num_conv_pos_embedding_groups
__lowercase = len(self.conv_dim )
__lowercase = num_hidden_layers
__lowercase = intermediate_size
__lowercase = squeeze_factor
__lowercase = hidden_act
__lowercase = num_attention_heads
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = feat_proj_dropout
__lowercase = final_dropout
__lowercase = layerdrop
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase = apply_spec_augment
__lowercase = mask_time_prob
__lowercase = mask_time_length
__lowercase = mask_time_min_masks
__lowercase = mask_feature_prob
__lowercase = mask_feature_length
__lowercase = mask_feature_min_masks
# ctc loss
__lowercase = ctc_loss_reduction
__lowercase = ctc_zero_infinity
# sequence classification
__lowercase = use_weighted_layer_sum
__lowercase = classifier_proj_size
@property
def snake_case__ ( self : Dict ) -> str:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 634
|
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Tuple = LxmertTokenizer
lowercase__ : List[str] = LxmertTokenizerFast
lowercase__ : Optional[Any] = True
lowercase__ : List[Any] = True
def snake_case__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def snake_case__ ( self : Optional[int] , lowercase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = """UNwant\u00E9d,running"""
__lowercase = """unwanted, running"""
return input_text, output_text
def snake_case__ ( self : str ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowercase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [7, 4, 5, 10, 8, 9] )
def snake_case__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = tokenizer.tokenize(lowercase )
__lowercase = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
__lowercase = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(lowercase )
__lowercase = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
| 634
| 1
|
def lowerCamelCase__ ( lowercase = 600851475143 ):
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE : Tuple = int(lowercase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : Optional[int] = 2
while i * i <= n:
while n % i == 0:
SCREAMING_SNAKE_CASE : Tuple = i
n //= i
i += 1
if n > 1:
SCREAMING_SNAKE_CASE : int = n
return int(lowercase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 62
|
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__a : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
UpperCamelCase = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
UpperCamelCase = dict(scheduler.config )
UpperCamelCase = 1
UpperCamelCase = FrozenDict(SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
UpperCamelCase = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
UpperCamelCase = dict(scheduler.config )
UpperCamelCase = True
UpperCamelCase = FrozenDict(SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.enable_attention_slicing(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCamelCase = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 512 , SCREAMING_SNAKE_CASE = 512 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = 7.5 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , **SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
UpperCamelCase = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
UpperCamelCase = self.segmentation_model(**SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCamelCase = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCamelCase = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
| 606
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : List[Any] = logging.get_logger(__name__)
A : List[str] = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''swinv2'''
lowerCamelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[str] , __magic_name__ : Dict=224 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Dict=3 , __magic_name__ : Dict=96 , __magic_name__ : Tuple=[2, 2, 6, 2] , __magic_name__ : Tuple=[3, 6, 12, 24] , __magic_name__ : List[Any]=7 , __magic_name__ : Optional[Any]=4.0 , __magic_name__ : List[str]=True , __magic_name__ : Tuple=0.0 , __magic_name__ : Union[str, Any]=0.0 , __magic_name__ : str=0.1 , __magic_name__ : List[Any]="gelu" , __magic_name__ : Optional[int]=False , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : str=1e-5 , __magic_name__ : List[str]=32 , **__magic_name__ : Union[str, Any] , ) -> int:
super().__init__(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embed_dim
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = len(__magic_name__ )
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = window_size
SCREAMING_SNAKE_CASE_ = mlp_ratio
SCREAMING_SNAKE_CASE_ = qkv_bias
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = use_absolute_embeddings
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE_ = int(embed_dim * 2 ** (len(__magic_name__ ) - 1) )
SCREAMING_SNAKE_CASE_ = (0, 0, 0, 0)
| 356
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
SCREAMING_SNAKE_CASE_ = quote(__UpperCamelCase )
return hfh.hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type="dataset" , revision=__UpperCamelCase )
| 356
| 1
|
'''simple docstring'''
import os
import pytest
from attr import dataclass
lowerCAmelCase : Dict = 'us-east-1' # defaults region
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = """arn:aws:iam::558105141721:role/sagemaker_execution_role"""
lowerCAmelCase_ = {
"""task_name""": """mnli""",
"""per_device_train_batch_size""": 16,
"""per_device_eval_batch_size""": 16,
"""do_train""": True,
"""do_eval""": True,
"""do_predict""": True,
"""output_dir""": """/opt/ml/model""",
"""overwrite_output_dir""": True,
"""max_steps""": 5_00,
"""save_steps""": 55_00,
}
lowerCAmelCase_ = {**hyperparameters, """max_steps""": 10_00}
@property
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
return F'''{self.framework}-transfromers-test'''
@property
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
return F'''./tests/sagemaker/scripts/{self.framework}'''
@property
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class')
def A_( A : Optional[int]):
UpperCamelCase = SageMakerTestEnvironment(framework=request.cls.framework)
| 3
|
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=32 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=[10, 20, 30, 40] , UpperCamelCase_ : Tuple=[1, 1, 2, 1] , UpperCamelCase_ : str=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict="relu" , UpperCamelCase_ : str=3 , UpperCamelCase_ : int=None , ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : List[str] = image_size
__UpperCAmelCase : Tuple = num_channels
__UpperCAmelCase : Union[str, Any] = embeddings_size
__UpperCAmelCase : Dict = hidden_sizes
__UpperCAmelCase : Dict = depths
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : str = num_labels
__UpperCAmelCase : Optional[int] = scope
__UpperCAmelCase : Dict = len(UpperCamelCase_)
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCAmelCase : Dict = self.get_config()
return config, pixel_values
def a_ ( self : Dict):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a_ ( self : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : List[str] = FlaxRegNetModel(config=UpperCamelCase_)
__UpperCAmelCase : Dict = model(UpperCamelCase_)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a_ ( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Tuple = FlaxRegNetForImageClassification(config=UpperCamelCase_)
__UpperCAmelCase : str = model(UpperCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowercase_ = False
lowercase_ = False
lowercase_ = False
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Tuple = FlaxRegNetModelTester(self)
__UpperCAmelCase : str = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_)
def a_ ( self : Dict):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a_ ( self : Tuple):
"""simple docstring"""
return
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_)
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_)
@unittest.skip(reason="RegNet does not use inputs_embeds")
def a_ ( self : Union[str, Any]):
"""simple docstring"""
pass
@unittest.skip(reason="RegNet does not support input and output embeddings")
def a_ ( self : Optional[int]):
"""simple docstring"""
pass
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : int = model_class(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Any = [*signature.parameters.keys()]
__UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase_)
def a_ ( self : int):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any]):
__UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_))
__UpperCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : str = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase_) , expected_num_stages + 1)
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__UpperCAmelCase : List[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : Optional[int] = model_class(UpperCamelCase_)
@jax.jit
def model_jitted(UpperCamelCase_ : int , **UpperCamelCase_ : Optional[int]):
return model(pixel_values=UpperCamelCase_ , **UpperCamelCase_)
with self.subTest("JIT Enabled"):
__UpperCAmelCase : Optional[Any] = model_jitted(**UpperCamelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
__UpperCAmelCase : Dict = model_jitted(**UpperCamelCase_).to_tuple()
self.assertEqual(len(UpperCamelCase_) , len(UpperCamelCase_))
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class a__ ( unittest.TestCase ):
@cached_property
def a_ ( self : Optional[int]):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040") if is_vision_available() else None
@slow
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Any = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040")
__UpperCAmelCase : Dict = self.default_image_processor
__UpperCAmelCase : str = prepare_img()
__UpperCAmelCase : int = image_processor(images=UpperCamelCase_ , return_tensors="np")
__UpperCAmelCase : Dict = model(**UpperCamelCase_)
# verify the logits
__UpperCAmelCase : Dict = (1, 1000)
self.assertEqual(outputs.logits.shape , UpperCamelCase_)
__UpperCAmelCase : Any = jnp.array([-0.4180, -1.5051, -3.4836])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4))
| 77
| 0
|
'''simple docstring'''
from math import factorial, radians
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = 18, __UpperCAmelCase = 10 ):
'''simple docstring'''
snake_case_ = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
snake_case_ = radians(__UpperCAmelCase )
snake_case_ = angle_in_radians
snake_case_ = 3
snake_case_ = -1
for _ in range(__UpperCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(__UpperCAmelCase )
snake_case_ = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(__UpperCAmelCase, __UpperCAmelCase )
if __name__ == "__main__":
__import__('doctest').testmod()
| 717
|
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
snake_case_ = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
snake_case_ = Dataset.from_dict(__UpperCAmelCase )
return dataset
class a ( _lowerCamelCase ):
def A_ ( self : Optional[int] ):
snake_case_ = get_dataset()
snake_case_ = make_duplicate_clusters(lowercase_ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def A_ ( self : Tuple ):
snake_case_ = get_dataset()
snake_case_ ,snake_case_ = deduplicate_dataset(lowercase_ )
self.assertEqual(len(lowercase_ ) , 2 )
print(lowercase_ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , lowercase_ )
| 593
| 0
|
import random
from typing import Any
def __magic_name__ ( __lowerCAmelCase : list ) -> list[Any]:
for _ in range(len(__lowerCAmelCase ) ):
__lowerCamelCase = random.randint(0 , len(__lowerCAmelCase ) - 1 )
__lowerCamelCase = random.randint(0 , len(__lowerCAmelCase ) - 1 )
__lowerCamelCase , __lowerCamelCase = data[b], data[a]
return data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
SCREAMING_SNAKE_CASE__ : List[str] = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 298
|
import math
from datetime import datetime, timedelta
def __magic_name__ ( __lowerCAmelCase : int ) -> datetime:
__lowerCamelCase = year % 19
__lowerCamelCase = year % 4
__lowerCamelCase = year % 7
__lowerCamelCase = math.floor(year / 100 )
__lowerCamelCase = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__lowerCamelCase = leap_day_inhibits / 4
__lowerCamelCase = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__lowerCamelCase = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowerCamelCase = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__lowerCamelCase = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__lowerCAmelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__lowerCAmelCase , 4 , 18 )
else:
return datetime(__lowerCAmelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
SCREAMING_SNAKE_CASE__ : int = "will be" if year > datetime.now().year else "was"
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 298
| 1
|
"""simple docstring"""
A = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
A = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
A = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 147
|
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def UpperCamelCase_ ( lowerCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
if not is_accelerate_available():
return method
__magic_name__ : int = version.parse(accelerate.__version__ ).base_version
if version.parse(lowerCamelCase ) < version.parse('''0.17.0''' ):
return method
def wrapper(self : Tuple , *lowerCamelCase : int , **lowerCamelCase : Any ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *lowerCamelCase , **lowerCamelCase )
return wrapper
| 147
| 1
|
from collections import Counter
from timeit import timeit
def UpperCamelCase ( snake_case__ : str = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def UpperCamelCase ( snake_case__ : str = "" ) -> bool:
if len(snake_case__ ) == 0:
return True
UpperCamelCase : Union[str, Any] = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
UpperCamelCase : dict[str, int] = {}
for character in lower_case_input_str:
UpperCamelCase : List[Any] = character_freq_dict.get(snake_case__ , 0 ) + 1
UpperCamelCase : int = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def UpperCamelCase ( snake_case__ : str = "" ) -> None:
print('\nFor string = ' , snake_case__ , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(snake_case__ ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(snake_case__ ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
__UpperCAmelCase = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
__UpperCAmelCase = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 40
|
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6'''))
def lowerCAmelCase (__A):
"""simple docstring"""
_a = credit_card_number
_a = 0
_a = len(__A) - 2
for i in range(__A , -1 , -2):
# double the value of every second digit
_a = int(cc_number[i])
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_a = cc_number[:i] + str(__A) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__A) - 1 , -1 , -2):
total += int(cc_number[i])
return total % 10 == 0
def lowerCAmelCase (__A):
"""simple docstring"""
_a = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''')
return False
if not 13 <= len(__A) <= 16:
print(F'''{error_message} of its length.''')
return False
if not validate_initial_digits(__A):
print(F'''{error_message} of its first two digits.''')
return False
if not luhn_validation(__A):
print(F'''{error_message} it fails the Luhn check.''')
return False
print(F'''{credit_card_number} is a valid credit card number.''')
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 11
| 0
|
from __future__ import annotations
class _a :
"""simple docstring"""
def __init__( self : Tuple , a : str , a : str ) ->Dict:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = text, pattern
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = len(_lowercase ), len(_lowercase )
def A_ ( self : List[str] , a : str ) ->Optional[Any]:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A_ ( self : Union[str, Any] , a : int ) ->Optional[int]:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A_ ( self : Union[str, Any] ) ->Optional[int]:
# searches pattern in text and returns index positions
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for i in range(self.textLen - self.patLen + 1 ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.mismatch_in_text(_lowercase )
if mismatch_index == -1:
positions.append(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = self.match_in_pattern(self.text[mismatch_index] )
SCREAMING_SNAKE_CASE__ : List[str] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__lowercase :Any = "ABAABA"
__lowercase :int = "AB"
__lowercase :str = BoyerMooreSearch(text, pattern)
__lowercase :List[str] = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 702
|
import sys
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = []
def A_ ( self : int , a : List[str] ) ->Dict:
return self.node_position[vertex]
def A_ ( self : Optional[Any] , a : Any , a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = pos
def A_ ( self : List[Any] , a : List[str] , a : Dict , a : Dict , a : List[Any] ) ->Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE__ : Dict = 2 * start + 1
else:
SCREAMING_SNAKE_CASE__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = temp, tempa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a )
self.top_to_bottom(a , a , a , a )
def A_ ( self : Union[str, Any] , a : Tuple , a : Tuple , a : Union[str, Any] , a : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE__ : List[Any] = heap[parent]
SCREAMING_SNAKE_CASE__ : str = position[parent]
self.set_position(position[parent] , a )
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
self.set_position(a , a )
break
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : List[str] = temp
self.set_position(a , 0 )
def A_ ( self : Union[str, Any] , a : int , a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = len(a ) // 2 - 1
for i in range(a , -1 , -1 ):
self.top_to_bottom(a , a , len(a ) , a )
def A_ ( self : Dict , a : List[Any] , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = positions[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.maxsize
self.top_to_bottom(a , 0 , len(a ) , a )
return temp
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Heap()
SCREAMING_SNAKE_CASE__ : Any = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE__ : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[str] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
SCREAMING_SNAKE_CASE__ : Any = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase :Union[str, Any] = int(input("Enter number of edges: ").strip())
__lowercase :Dict = defaultdict(list)
for _ in range(edges_number):
__lowercase :Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 26
| 0
|
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = int(SCREAMING_SNAKE_CASE__ )
if n_element < 1:
snake_case_ = ValueError('''a should be a positive number''' )
raise my_error
snake_case_ = [1]
snake_case_, snake_case_, snake_case_ = (0, 0, 0)
snake_case_ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase_ = input('''Enter the last number (nth term) of the Hamming Number Series: ''')
print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''')
lowerCAmelCase_ = hamming(int(n))
print('''-----------------------------------------------------''')
print(f"""The list with nth numbers is: {hamming_numbers}""")
print('''-----------------------------------------------------''')
| 39
|
import argparse
import json
from tqdm import tqdm
def UpperCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=UpperCAmelCase_ , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=UpperCAmelCase_ , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=UpperCAmelCase_ , help='''where to store parsed gold_data_path file''' , )
_lowercase : Dict = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
_lowercase : str = json.load(UpperCAmelCase_ )
for dpr_record in tqdm(UpperCAmelCase_ ):
_lowercase : Optional[Any] = dpr_record['''question''']
_lowercase : Union[str, Any] = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(UpperCAmelCase_ ) + '''\n''' )
if __name__ == "__main__":
main()
| 322
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
_lowerCamelCase = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
_lowerCamelCase = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class lowerCamelCase_ ( _snake_case ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
_lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Dict = SqueezeBertTokenizer
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__="[UNK]" , UpperCAmelCase__="[SEP]" , UpperCAmelCase__="[PAD]" , UpperCAmelCase__="[CLS]" , UpperCAmelCase__="[MASK]" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ):
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ = getattr(lowerCAmelCase__ , normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = strip_accents
SCREAMING_SNAKE_CASE__ = tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ = normalizer_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ = do_lower_case
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__=None ):
SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 701
|
"""simple docstring"""
from math import sqrt
def __lowercase ( lowerCamelCase_ : int ):
SCREAMING_SNAKE_CASE__ = 0
for i in range(1 , int(sqrt(lowerCamelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowerCamelCase_ ):
total += i + n // i
elif i == sqrt(lowerCamelCase_ ):
total += i
return total - n
def __lowercase ( lowerCamelCase_ : int = 10000 ):
SCREAMING_SNAKE_CASE__ = sum(
i
for i in range(1 , lowerCamelCase_ )
if sum_of_divisors(sum_of_divisors(lowerCamelCase_ ) ) == i and sum_of_divisors(lowerCamelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 112
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Tuple = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 501
|
'''simple docstring'''
from collections.abc import Callable
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : float = a
lowerCamelCase_ : float = b
if function(__UpperCAmelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(__UpperCAmelCase ) == 0:
return b
elif (
function(__UpperCAmelCase ) * function(__UpperCAmelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
lowerCamelCase_ : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__UpperCAmelCase ) == 0:
return mid
elif function(__UpperCAmelCase ) * function(__UpperCAmelCase ) < 0:
lowerCamelCase_ : List[str] = mid
else:
lowerCamelCase_ : Any = mid
lowerCamelCase_ : int = start + (end - start) / 2.0
return mid
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 501
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "mgp-str"
def __init__( self , __UpperCAmelCase=[32, 128] , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=27 , __UpperCAmelCase=38 , __UpperCAmelCase=5_0257 , __UpperCAmelCase=3_0522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=0.0_2 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = max_token_length
__UpperCamelCase = num_character_labels
__UpperCamelCase = num_bpe_labels
__UpperCamelCase = num_wordpiece_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = mlp_ratio
__UpperCamelCase = distilled
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = drop_rate
__UpperCamelCase = qkv_bias
__UpperCamelCase = attn_drop_rate
__UpperCamelCase = drop_path_rate
__UpperCamelCase = output_aa_attentions
__UpperCamelCase = initializer_range
| 293
|
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
UpperCamelCase : Union[str, Any] = "__DUMMY_TRANSFORMERS_USER__"
UpperCamelCase : List[Any] = "Dummy User"
UpperCamelCase : List[Any] = "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"
UpperCamelCase : Any = "https://hub-ci.huggingface.co"
UpperCamelCase : str = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
UpperCamelCase : Optional[Any] = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
UpperCamelCase : Optional[int] = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def A ( snake_case :Optional[Any] ) -> Union[str, Any]:
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , snake_case )
@pytest.fixture
def A ( snake_case :str ) -> List[Any]:
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , snake_case )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , snake_case )
@pytest.fixture
def A ( snake_case :Optional[int] ) -> Dict:
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , snake_case )
@pytest.fixture
def A ( snake_case :List[Any] , snake_case :Tuple ) -> Tuple:
HfFolder.save_token(snake_case )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def A ( ) -> List[Any]:
return HfApi(endpoint=snake_case )
@pytest.fixture(scope='session' )
def A ( snake_case :HfApi ) -> List[Any]:
__UpperCamelCase = HfFolder.get_token()
HfFolder.save_token(snake_case )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(snake_case )
@pytest.fixture
def A ( snake_case :str ) -> str:
def _cleanup_repo(snake_case :Union[str, Any] ):
hf_api.delete_repo(snake_case , token=snake_case , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def A ( snake_case :List[str] ) -> Any:
@contextmanager
def _temporary_repo(snake_case :Tuple ):
try:
yield repo_id
finally:
cleanup_repo(snake_case )
return _temporary_repo
@pytest.fixture(scope='session' )
def A ( snake_case :HfApi , snake_case :Dict , snake_case :Dict ) -> List[str]:
__UpperCamelCase = f'repo_txt_data-{int(time.time() * 10e3 )}'
__UpperCamelCase = f'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(snake_case , token=snake_case , repo_type='dataset' , private=snake_case )
hf_api.upload_file(
token=snake_case , path_or_fileobj=str(snake_case ) , path_in_repo='data/text_data.txt' , repo_id=snake_case , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(snake_case , token=snake_case , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A ( snake_case :Optional[int] , snake_case :str , snake_case :Tuple ) -> Any:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def A ( snake_case :HfApi , snake_case :List[Any] , snake_case :str ) -> Optional[int]:
__UpperCamelCase = f'repo_zipped_txt_data-{int(time.time() * 10e3 )}'
__UpperCamelCase = f'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(snake_case , token=snake_case , repo_type='dataset' , private=snake_case )
hf_api.upload_file(
token=snake_case , path_or_fileobj=str(snake_case ) , path_in_repo='data.zip' , repo_id=snake_case , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(snake_case , token=snake_case , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A ( snake_case :int , snake_case :Optional[int] , snake_case :Optional[int] ) -> str:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def A ( snake_case :HfApi , snake_case :List[str] , snake_case :Any ) -> List[Any]:
__UpperCamelCase = f'repo_zipped_img_data-{int(time.time() * 10e3 )}'
__UpperCamelCase = f'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(snake_case , token=snake_case , repo_type='dataset' , private=snake_case )
hf_api.upload_file(
token=snake_case , path_or_fileobj=str(snake_case ) , path_in_repo='data.zip' , repo_id=snake_case , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(snake_case , token=snake_case , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A ( snake_case :List[str] , snake_case :Optional[Any] , snake_case :Optional[int] ) -> Optional[int]:
return hf_private_dataset_repo_zipped_img_data_
| 293
| 1
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
_UpperCAmelCase : Union[str, Any] = """Usage of script: script_name <size_of_canvas:int>"""
_UpperCAmelCase : Union[str, Any] = [0] * 1_00 + [1] * 10
random.shuffle(choice)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[list[bool]]:
lowerCamelCase__ : str = [[False for i in range(_lowerCAmelCase )] for j in range(_lowerCAmelCase )]
return canvas
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> None:
for i, row in enumerate(_lowerCAmelCase ):
for j, _ in enumerate(_lowerCAmelCase ):
lowerCamelCase__ : List[str] = bool(random.getrandbits(1 ) )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[list[bool]]:
lowerCamelCase__ : str = np.array(_lowerCAmelCase )
lowerCamelCase__ : str = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(_lowerCAmelCase ):
for c, pt in enumerate(_lowerCAmelCase ):
lowerCamelCase__ : Any = __judge_point(
_lowerCAmelCase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
lowerCamelCase__ : Optional[int] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
lowerCamelCase__ : list[list[bool]] = current_canvas.tolist()
return return_canvas
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> bool:
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : List[str] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
lowerCamelCase__ : Optional[int] = pt
if pt:
if alive < 2:
lowerCamelCase__ : Optional[int] = False
elif alive == 2 or alive == 3:
lowerCamelCase__ : Tuple = True
elif alive > 3:
lowerCamelCase__ : Union[str, Any] = False
else:
if alive == 3:
lowerCamelCase__ : Tuple = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
_UpperCAmelCase : Any = int(sys.argv[1])
# main working structure of this module.
_UpperCAmelCase : Any = create_canvas(canvas_size)
seed(c)
_UpperCAmelCase : int = plt.subplots()
fig.show()
_UpperCAmelCase : List[str] = ListedColormap(["""w""", """k"""])
try:
while True:
_UpperCAmelCase : Dict = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 295
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(
lowerCamelCase__, split=lowerCamelCase__, features=lowerCamelCase__, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__, streaming=lowerCamelCase__, num_proc=lowerCamelCase__, **lowerCamelCase__, )
A : List[Any] = path_or_paths if isinstance(lowerCamelCase__, lowerCamelCase__ ) else {self.split: path_or_paths}
A : str = Text(
cache_dir=lowerCamelCase__, data_files=lowerCamelCase__, features=lowerCamelCase__, **lowerCamelCase__, )
def _lowerCAmelCase ( self ):
# Build iterable dataset
if self.streaming:
A : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A : List[str] = None
A : Dict = None
A : Tuple = None
A : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__, download_mode=lowerCamelCase__, verification_mode=lowerCamelCase__, base_path=lowerCamelCase__, num_proc=self.num_proc, )
A : List[str] = self.builder.as_dataset(
split=self.split, verification_mode=lowerCamelCase__, in_memory=self.keep_in_memory )
return dataset
| 662
| 0
|
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class snake_case_ :
'''simple docstring'''
def __init__( self, A_, A_=13, A_=7, A_=True, A_=True, A_=True, A_=True, A_=99, A_=32, A_=5, A_=4, A_=4, A_="gelu", A_=0.0, A_=0.1, A_=True, A_=512, A_=16, A_=2, A_=0.02, A_=3, A_=4, A_=None, ) -> int:
UpperCAmelCase__ =parent
UpperCAmelCase__ =batch_size
UpperCAmelCase__ =seq_length
UpperCAmelCase__ =is_training
UpperCAmelCase__ =use_input_mask
UpperCAmelCase__ =use_token_type_ids
UpperCAmelCase__ =use_labels
UpperCAmelCase__ =vocab_size
UpperCAmelCase__ =hidden_size
UpperCAmelCase__ =num_hidden_layers
UpperCAmelCase__ =num_attention_heads
UpperCAmelCase__ =intermediate_multiple_size
UpperCAmelCase__ =hidden_act
UpperCAmelCase__ =hidden_dropout
UpperCAmelCase__ =attention_dropout
UpperCAmelCase__ =weight_tying
UpperCAmelCase__ =max_position_embeddings
UpperCAmelCase__ =type_vocab_size
UpperCAmelCase__ =type_sequence_label_size
UpperCAmelCase__ =initializer_range
UpperCAmelCase__ =num_labels
UpperCAmelCase__ =num_choices
UpperCAmelCase__ =scope
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase__ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase__ =None
if self.use_input_mask:
UpperCAmelCase__ =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ =None
if self.use_labels:
UpperCAmelCase__ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCAmelCase__ =self.get_config()
return config, input_ids, input_mask, token_labels
def __UpperCAmelCase ( self ) -> List[Any]:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_multiple_size=self.intermediate_multiple_size, hidden_act=self.hidden_act, hidden_dropout=self.hidden_dropout, attention_dropout=self.attention_dropout, weight_tying=self.weight_tying, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCAmelCase_, initializer_range=self.initializer_range, )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =self.prepare_config_and_inputs()
UpperCAmelCase__ =True
return config, input_ids, input_mask, token_labels
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Dict:
UpperCAmelCase__ =GPTNeoXJapaneseModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase__ =model(lowerCAmelCase_, attention_mask=lowerCAmelCase_ )
UpperCAmelCase__ =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Optional[Any]:
UpperCAmelCase__ =True
UpperCAmelCase__ =GPTNeoXJapaneseModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase__ =model(lowerCAmelCase_, attention_mask=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self, A_, A_, A_, A_ ) -> Optional[Any]:
UpperCAmelCase__ =GPTNeoXJapaneseForCausalLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase__ =model(lowerCAmelCase_, attention_mask=lowerCAmelCase_, labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self, A_, A_, A_ ) -> List[str]:
UpperCAmelCase__ =True
UpperCAmelCase__ =GPTNeoXJapaneseForCausalLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# first forward pass
UpperCAmelCase__ =model(lowerCAmelCase_, attention_mask=lowerCAmelCase_, use_cache=lowerCAmelCase_ )
UpperCAmelCase__ =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase__ =ids_tensor((self.batch_size, 3), config.vocab_size )
UpperCAmelCase__ =ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
UpperCAmelCase__ =torch.cat([input_ids, next_tokens], dim=-1 )
UpperCAmelCase__ =torch.cat([input_mask, next_mask], dim=-1 )
UpperCAmelCase__ =model(lowerCAmelCase_, attention_mask=lowerCAmelCase_, output_hidden_states=lowerCAmelCase_ )
UpperCAmelCase__ =output_from_no_past["hidden_states"][0]
UpperCAmelCase__ =model(
lowerCAmelCase_, attention_mask=lowerCAmelCase_, past_key_values=lowerCAmelCase_, output_hidden_states=lowerCAmelCase_, )["hidden_states"][0]
# select random slice
UpperCAmelCase__ =ids_tensor((1,), output_from_past.shape[-1] ).item()
UpperCAmelCase__ =output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase__ =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase_, lowerCAmelCase_, atol=1E-3 ) )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ =self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =config_and_inputs
UpperCAmelCase__ ={"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case_ ( __lowerCAmelCase, __lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
__UpperCamelCase = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase__ =GPTNeoXJapaneseModelTester(self )
UpperCAmelCase__ =ConfigTester(self, config_class=lowerCAmelCase_, hidden_size=37 )
def __UpperCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
def __UpperCAmelCase ( self ) -> Dict:
# This regression test was failing with PyTorch < 1.3
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase__ =None
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase_ )
@slow
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ ="abeja/gpt-neox-japanese-2.7b"
UpperCAmelCase__ =["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
UpperCAmelCase__ =[
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
UpperCAmelCase__ =GPTNeoXJapaneseTokenizer.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase__ =GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase__ =[]
for prompt in prompts:
UpperCAmelCase__ =tokenizer(lowerCAmelCase_, return_tensors="pt" ).input_ids
UpperCAmelCase__ =model.generate(lowerCAmelCase_, max_length=50 )
UpperCAmelCase__ =tokenizer.batch_decode(lowerCAmelCase_, skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_, lowerCAmelCase_ )
| 717
|
from __future__ import annotations
from collections import deque
class snake_case_ :
'''simple docstring'''
def __init__( self, A_ ) -> str:
UpperCAmelCase__ =[]
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(A_ )
self.set_fail_transitions()
def __UpperCAmelCase ( self, A_, A_ ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def __UpperCAmelCase ( self, A_ ) -> None:
UpperCAmelCase__ =0
for character in keyword:
UpperCAmelCase__ =self.find_next_state(A_, A_ )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCAmelCase__ =len(self.adlist ) - 1
else:
UpperCAmelCase__ =next_state
self.adlist[current_state]["output"].append(A_ )
def __UpperCAmelCase ( self ) -> None:
UpperCAmelCase__ =deque()
for node in self.adlist[0]["next_states"]:
q.append(A_ )
UpperCAmelCase__ =0
while q:
UpperCAmelCase__ =q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(A_ )
UpperCAmelCase__ =self.adlist[r]["fail_state"]
while (
self.find_next_state(A_, self.adlist[child]["value"] ) is None
and state != 0
):
UpperCAmelCase__ =self.adlist[state]["fail_state"]
UpperCAmelCase__ =self.find_next_state(
A_, self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
UpperCAmelCase__ =0
UpperCAmelCase__ =(
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def __UpperCAmelCase ( self, A_ ) -> dict[str, list[int]]:
UpperCAmelCase__ ={} # returns a dict with keywords and list of its occurrences
UpperCAmelCase__ =0
for i in range(len(A_ ) ):
while (
self.find_next_state(A_, string[i] ) is None
and current_state != 0
):
UpperCAmelCase__ =self.adlist[current_state]["fail_state"]
UpperCAmelCase__ =self.find_next_state(A_, string[i] )
if next_state is None:
UpperCAmelCase__ =0
else:
UpperCAmelCase__ =next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCAmelCase__ =[]
result[key].append(i - len(A_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 510
| 0
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__a = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = 42
lowercase = 42
lowercase = 42
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = 42
lowercase = 42
lowercase = None
lowercase = None
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "train"
lowercase = "dev"
lowercase = "test"
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def lowerCamelCase ( snake_case_ : Optional[Any] , snake_case_ : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def lowerCamelCase ( snake_case_ : str ):
raise NotImplementedError
@staticmethod
def lowerCamelCase ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , snake_case_ : Optional[int]=False , snake_case_ : Dict="[CLS]" , snake_case_ : str=1 , snake_case_ : Dict="[SEP]" , snake_case_ : List[str]=False , snake_case_ : int=False , snake_case_ : Tuple=0 , snake_case_ : Union[str, Any]=0 , snake_case_ : List[Any]=-100 , snake_case_ : Any=0 , snake_case_ : Union[str, Any]=True , ):
snake_case__ : int = {label: i for i, label in enumerate(snake_case_ )}
snake_case__ : List[Any] = []
for ex_index, example in enumerate(snake_case_ ):
if ex_index % 10_000 == 0:
logger.info("""Writing example %d of %d""" , snake_case_ , len(snake_case_ ) )
snake_case__ : Tuple = []
snake_case__ : Dict = []
for word, label in zip(example.words , example.labels ):
snake_case__ : Any = tokenizer.tokenize(snake_case_ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(snake_case_ ) > 0:
tokens.extend(snake_case_ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(snake_case_ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
snake_case__ : Dict = tokenizer.num_special_tokens_to_add()
if len(snake_case_ ) > max_seq_length - special_tokens_count:
snake_case__ : Tuple = tokens[: (max_seq_length - special_tokens_count)]
snake_case__ : Tuple = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
snake_case__ : Dict = [sequence_a_segment_id] * len(snake_case_ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
snake_case__ : str = [cls_token] + tokens
snake_case__ : Union[str, Any] = [pad_token_label_id] + label_ids
snake_case__ : Any = [cls_token_segment_id] + segment_ids
snake_case__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(snake_case_ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
snake_case__ : Tuple = [1 if mask_padding_with_zero else 0] * len(snake_case_ )
# Zero-pad up to the sequence length.
snake_case__ : Dict = max_seq_length - len(snake_case_ )
if pad_on_left:
snake_case__ : Optional[Any] = ([pad_token] * padding_length) + input_ids
snake_case__ : Union[str, Any] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
snake_case__ : Union[str, Any] = ([pad_token_segment_id] * padding_length) + segment_ids
snake_case__ : Union[str, Any] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(snake_case_ ) == max_seq_length
assert len(snake_case_ ) == max_seq_length
assert len(snake_case_ ) == max_seq_length
assert len(snake_case_ ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(snake_case_ ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(snake_case_ ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(snake_case_ ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(snake_case_ ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(snake_case_ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
snake_case__ : Tuple = None
features.append(
InputFeatures(
input_ids=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , label_ids=snake_case_ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = 42
lowercase = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , snake_case_ : TokenClassificationTask , snake_case_ : str , snake_case_ : PreTrainedTokenizer , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Optional[int] = None , snake_case_ : Tuple=False , snake_case_ : Split = Split.train , ):
# Load data features from cache or dataset file
snake_case__ : Tuple = os.path.join(
snake_case_ , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(snake_case_ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case__ : Dict = cached_features_file + """.lock"""
with FileLock(snake_case_ ):
if os.path.exists(snake_case_ ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
snake_case__ : Tuple = torch.load(snake_case_ )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
snake_case__ : Any = token_classification_task.read_examples_from_file(snake_case_ , snake_case_ )
# TODO clean up all this to leverage built-in features of tokenizers
snake_case__ : Any = token_classification_task.convert_examples_to_features(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=snake_case_ , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"Saving features into cached file {cached_features_file}" )
torch.save(self.features , snake_case_ )
def __len__( self : str ):
return len(self.features )
def __getitem__( self : int , snake_case_ : Dict ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = 42
lowercase = -1_00
def __init__( self : List[str] , snake_case_ : TokenClassificationTask , snake_case_ : str , snake_case_ : PreTrainedTokenizer , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Optional[int] = None , snake_case_ : Any=False , snake_case_ : Split = Split.train , ):
snake_case__ : int = token_classification_task.read_examples_from_file(snake_case_ , snake_case_ )
# TODO clean up all this to leverage built-in features of tokenizers
snake_case__ : Optional[Any] = token_classification_task.convert_examples_to_features(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=snake_case_ , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
snake_case__ : str = tf.data.Dataset.from_generator(
snake_case_ , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
snake_case__ : str = tf.data.Dataset.from_generator(
snake_case_ , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Optional[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : str , snake_case_ : List[str] ):
return self.features[i]
| 374
|
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__a = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def __snake_case( _lowerCAmelCase=True ) -> Dict:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a ) )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = None
lowercase = None
def lowerCamelCase ( self : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
with TemporaryDirectory() as tmp_dir:
snake_case__ : Dict = dataset_module_factory(snake_case_ , cache_dir=snake_case_ )
snake_case__ : Optional[int] = import_main_class(dataset_module.module_path , dataset=snake_case_ )
snake_case__ : DatasetBuilder = builder_cls(
cache_dir=snake_case_ , config_name=snake_case_ , hash=dataset_module.hash , )
snake_case__ : Dict = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=snake_case_ ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
snake_case__ : List[str] = cached_path(snake_case_ , cache_dir=snake_case_ )
self.assertTrue(os.path.exists(snake_case_ ) )
@pytest.mark.integration
def __snake_case( _lowerCAmelCase ) -> Tuple:
snake_case__ : Optional[int] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
snake_case__ : Dict = dataset_module_factory("""wikipedia""" , cache_dir=_lowerCAmelCase )
snake_case__ : Dict = import_main_class(dataset_module.module_path )
snake_case__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
snake_case__ : Any = None
builder_instance.download_and_prepare()
snake_case__ : Union[str, Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ : Optional[int] = dataset_module_factory("""wikipedia""" , cache_dir=_lowerCAmelCase )
snake_case__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
snake_case__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
snake_case__ : Any = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert "train" in ds
assert isinstance(ds["""train"""] , _lowerCAmelCase )
assert next(iter(ds["""train"""] ) )
| 374
| 1
|
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__UpperCAmelCase = get_logger(__name__)
class __lowercase ( enum.Enum ):
snake_case_ = """all_checks"""
snake_case_ = """basic_checks"""
snake_case_ = """no_checks"""
class __lowercase ( __lowerCamelCase ):
pass
class __lowercase ( __lowerCamelCase ):
pass
class __lowercase ( __lowerCamelCase ):
pass
class __lowercase ( __lowerCamelCase ):
pass
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ):
'''simple docstring'''
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
UpperCAmelCase__ : Tuple = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
UpperCAmelCase__ : Dict = """ for """ + verification_name if verification_name is not None else """"""
if len(__UpperCamelCase ) > 0:
raise NonMatchingChecksumError(
F"Checksums didn't match{for_verification_name}:\n"
F"{bad_urls}\n"
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class __lowercase ( __lowerCamelCase ):
pass
class __lowercase ( __lowerCamelCase ):
pass
class __lowercase ( __lowerCamelCase ):
pass
class __lowercase ( __lowerCamelCase ):
pass
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise UnexpectedSplits(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
UpperCAmelCase__ : Union[str, Any] = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__UpperCamelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__UpperCamelCase ) )
logger.info("""All the splits matched successfully.""" )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = True ):
'''simple docstring'''
if record_checksum:
UpperCAmelCase__ : Optional[Any] = shaaaa()
with open(__UpperCamelCase , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"""""" ):
m.update(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = m.hexdigest()
else:
UpperCAmelCase__ : Dict = None
return {"num_bytes": os.path.getsize(__UpperCamelCase ), "checksum": checksum}
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 705
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class __lowercase ( __lowerCamelCase ):
snake_case_ = """markuplm"""
def __init__( self : str ,A : List[Any]=30_522 ,A : Tuple=768 ,A : str=12 ,A : int=12 ,A : int=3_072 ,A : Optional[int]="gelu" ,A : Optional[int]=0.1 ,A : Optional[int]=0.1 ,A : Any=512 ,A : Any=2 ,A : str=0.0_2 ,A : int=1e-12 ,A : int=0 ,A : str=0 ,A : List[Any]=2 ,A : List[str]=256 ,A : Union[str, Any]=1_024 ,A : List[Any]=216 ,A : Any=1_001 ,A : Optional[int]=32 ,A : Any=50 ,A : int="absolute" ,A : Dict=True ,A : int=None ,**A : Optional[int] ,):
'''simple docstring'''
super().__init__(
pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,**A ,)
UpperCAmelCase__ : Dict = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : Any = intermediate_size
UpperCAmelCase__ : Dict = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = max_position_embeddings
UpperCAmelCase__ : Dict = type_vocab_size
UpperCAmelCase__ : Optional[Any] = initializer_range
UpperCAmelCase__ : str = layer_norm_eps
UpperCAmelCase__ : Any = position_embedding_type
UpperCAmelCase__ : int = use_cache
UpperCAmelCase__ : List[str] = classifier_dropout
# additional properties
UpperCAmelCase__ : Optional[int] = max_depth
UpperCAmelCase__ : List[str] = max_xpath_tag_unit_embeddings
UpperCAmelCase__ : Any = max_xpath_subs_unit_embeddings
UpperCAmelCase__ : str = tag_pad_id
UpperCAmelCase__ : Dict = subs_pad_id
UpperCAmelCase__ : List[str] = xpath_unit_hidden_size
| 194
| 0
|
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_SCREAMING_SNAKE_CASE = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_SCREAMING_SNAKE_CASE = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __lowerCamelCase ( __lowerCAmelCase : str ) -> str:
if "://" in dataset_path:
snake_case = dataset_path.split("""://""" )[1]
return dataset_path
def __lowerCamelCase ( __lowerCAmelCase : fsspec.AbstractFileSystem ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __lowerCamelCase ( __lowerCAmelCase : fsspec.AbstractFileSystem , __lowerCAmelCase : str , __lowerCAmelCase : str ) -> Optional[Any]:
snake_case = not is_remote_filesystem(__lowerCAmelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__lowerCAmelCase ) , fs._strip_protocol(__lowerCAmelCase ) )
else:
fs.mv(__lowerCAmelCase , __lowerCAmelCase , recursive=__lowerCAmelCase )
def __lowerCamelCase ( ) -> None:
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
snake_case = None
snake_case = None
snake_case = threading.Lock()
| 369
|
'''simple docstring'''
import os
from collections.abc import Iterator
def __lowerCamelCase ( __lowerCAmelCase : str = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(__lowerCAmelCase ):
snake_case = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__lowerCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__lowerCAmelCase , __lowerCAmelCase ).lstrip("""./""" )
def __lowerCamelCase ( __lowerCAmelCase : List[str] ) -> Any:
return F'''{i * " "}*''' if i else "\n##"
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> str:
snake_case = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__lowerCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(__lowerCAmelCase )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def __lowerCamelCase ( __lowerCAmelCase : str = "." ) -> None:
snake_case = """"""
for filepath in sorted(good_file_paths(__lowerCAmelCase ) ):
snake_case , snake_case = os.path.split(__lowerCAmelCase )
if filepath != old_path:
snake_case = print_path(__lowerCAmelCase , __lowerCAmelCase )
snake_case = (filepath.count(os.sep ) + 1) if filepath else 0
snake_case = F'''{filepath}/{filename}'''.replace(""" """ , """%20""" )
snake_case = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F'''{md_prefix(__lowerCAmelCase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 369
| 1
|
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase = get_tests_dir('''fixtures''')
class __magic_name__ ( unittest.TestCase ):
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :Any = mock.Mock()
lowercase :Union[str, Any] = 5_0_0
lowercase :Any = {}
lowercase :Union[str, Any] = HTTPError
lowercase :Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowercase :Optional[int] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=__UpperCamelCase ) as mock_head:
lowercase :Any = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :int = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class __magic_name__ ( unittest.TestCase ):
@classmethod
def __snake_case ( cls : List[str] ):
'''simple docstring'''
lowercase :int = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def __snake_case ( cls : Any ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def __snake_case ( self : int ):
'''simple docstring'''
lowercase :int = WavaVecaFeatureExtractor.from_pretrained(__UpperCamelCase )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
lowercase :Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__UpperCamelCase , repo_id='''test-feature-extractor''' , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
lowercase :List[Any] = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :List[str] = WavaVecaFeatureExtractor.from_pretrained(__UpperCamelCase )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
lowercase :str = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__UpperCamelCase , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
lowercase :List[Any] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def __snake_case ( self : int ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
lowercase :Dict = CustomFeatureExtractor.from_pretrained(__UpperCamelCase )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
lowercase :Any = AutoFeatureExtractor.from_pretrained(
f"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 702
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 475
| 0
|
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
return abs(_lowerCamelCase ) if a == 0 else greatest_common_divisor(b % a , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
while y: # --> when y=0 then loop will terminate and return x as final GCD.
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = y, x % y
return abs(_lowerCamelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
try:
_lowerCamelCase : Any = input("Enter two integers separated by comma (,): " ).split("," )
_lowerCamelCase : Dict = int(nums[0] )
_lowerCamelCase : Tuple = int(nums[1] )
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(_lowerCamelCase , _lowerCamelCase )}""" )
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_lowerCamelCase , _lowerCamelCase )}""" )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main()
| 46
|
"""simple docstring"""
import os
def lowercase ():
with open(os.path.dirname(_lowerCAmelCase ) + """/grid.txt""" ) as f:
__lowerCAmelCase = [] # noqa: E741
for _ in range(20 ):
l.append([int(_lowerCAmelCase ) for x in f.readline().split()] )
__lowerCAmelCase = 0
# right
for i in range(20 ):
for j in range(17 ):
__lowerCAmelCase = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__lowerCAmelCase = temp
# down
for i in range(17 ):
for j in range(20 ):
__lowerCAmelCase = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__lowerCAmelCase = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
__lowerCAmelCase = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__lowerCAmelCase = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
__lowerCAmelCase = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__lowerCAmelCase = temp
return maximum
if __name__ == "__main__":
print(solution())
| 465
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
UpperCamelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = []
lowercase_ : List[str] = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
lowercase_ : float = -1
for j in range(i + 1 , _UpperCamelCase ):
if arr[i] < arr[j]:
lowercase_ : Union[str, Any] = arr[j]
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = []
for i, outer in enumerate(_UpperCamelCase ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : Optional[Any] = inner
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = len(_UpperCamelCase )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(_UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
UpperCamelCase__ = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 640
| 1
|
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : List[str] ) -> Tuple:
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(a__ ):
for j in range(a__ ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) ,end="""\t""" )
else:
print("""INF""" ,end="""\t""" )
print()
def __SCREAMING_SNAKE_CASE ( a__ : Optional[int] ,a__ : int ) -> Any:
__A : Optional[Any] = [[float("""inf""" ) for _ in range(a__ )] for _ in range(a__ )]
for i in range(a__ ):
for j in range(a__ ):
__A : Union[str, Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(a__ ):
# looping through rows of graph array
for i in range(a__ ):
# looping through columns of graph array
for j in range(a__ ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__A : Any = dist[i][k] + dist[k][j]
_print_dist(a__ ,a__ )
return dist, v
if __name__ == "__main__":
UpperCAmelCase_ : str = int(input('''Enter number of vertices: '''))
UpperCAmelCase_ : List[Any] = int(input('''Enter number of edges: '''))
UpperCAmelCase_ : int = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
UpperCAmelCase_ : Union[str, Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
UpperCAmelCase_ : int = int(input('''Enter source:'''))
UpperCAmelCase_ : Tuple = int(input('''Enter destination:'''))
UpperCAmelCase_ : int = float(input('''Enter weight:'''))
UpperCAmelCase_ : List[Any] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 17
|
from __future__ import annotations
import requests
lowerCAmelCase__ : Any =set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def __lowercase ( a__ , a__ = 1 , a__ = "new" , a__ = None ) -> dict:
__SCREAMING_SNAKE_CASE = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(a__ ) - valid_terms ) ):
__SCREAMING_SNAKE_CASE = f"""Invalid search term: {invalid_search_terms}"""
raise ValueError(a__ )
__SCREAMING_SNAKE_CASE = requests.get(
f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'User-agent': 'A random string'} , )
if response.status_code == 4_29:
raise requests.HTTPError
__SCREAMING_SNAKE_CASE = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(a__ )}
__SCREAMING_SNAKE_CASE = {}
for id_ in range(a__ ):
__SCREAMING_SNAKE_CASE = {
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 148
| 0
|
'''simple docstring'''
from math import isqrt
def __snake_case( _lowerCAmelCase ) -> list[int]:
snake_case__ : Union[str, Any] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowerCAmelCase , _lowerCAmelCase ):
snake_case__ : Any = False
return [i for i in range(2 , _lowerCAmelCase ) if is_prime[i]]
def __snake_case( _lowerCAmelCase = 10**8 ) -> int:
snake_case__ : Dict = calculate_prime_numbers(max_number // 2 )
snake_case__ : Any = 0
snake_case__ : Any = 0
snake_case__ : List[str] = len(_lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"{solution() = }")
| 706
|
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __snake_case( ) -> Union[str, Any]:
snake_case__ : Union[str, Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=_lowerCAmelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=_lowerCAmelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=_lowerCAmelCase )
return parser.parse_args()
def __snake_case( ) -> int:
snake_case__ : Tuple = parse_args()
# Import training_script as a module.
snake_case__ : str = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case__ : Tuple = script_fpath.stem
snake_case__ : Any = importlib.import_module(_lowerCAmelCase )
# Patch sys.argv
snake_case__ : Any = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 301
| 0
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__snake_case : int ={
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__snake_case : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 647
|
from bisect import bisect
from itertools import accumulate
def lowerCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
A = sorted(zip(lowerCAmelCase__ , lowerCAmelCase__ ) , key=lambda lowerCAmelCase__ : x[0] / x[1] , reverse=lowerCAmelCase__ )
A , A = [i[0] for i in r], [i[1] for i in r]
A = list(accumulate(lowerCAmelCase__ ) )
A = bisect(lowerCAmelCase__ , lowerCAmelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106
| 0
|
'''simple docstring'''
def __a ( __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
'''simple docstring'''
return number | (1 << position)
def __a ( __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
'''simple docstring'''
return number & ~(1 << position)
def __a ( __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
'''simple docstring'''
return number ^ (1 << position)
def __a ( __lowerCamelCase : int , __lowerCamelCase : int ) -> bool:
'''simple docstring'''
return ((number >> position) & 1) == 1
def __a ( __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 461
|
'''simple docstring'''
import json
import sys
def __a ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(__lowerCamelCase , encoding="utf-8" ) as f:
lowercase_ = json.load(__lowerCamelCase )
lowercase_ = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(__lowerCamelCase ):
lowercase_ = results[benchmark_name]
lowercase_ = benchmark_name.split("/" )[-1]
output_md.append(f'### Benchmark: {benchmark_file_name}' )
lowercase_ = "| metric |"
lowercase_ = "|--------|"
lowercase_ = "| new / old (diff) |"
for metric_name in sorted(__lowerCamelCase ):
lowercase_ = benchmark_res[metric_name]
lowercase_ = metric_vals["new"]
lowercase_ = metric_vals.get("old" , __lowerCamelCase )
lowercase_ = metric_vals.get("diff" , __lowerCamelCase )
lowercase_ = f' {new_val:f}' if isinstance(__lowerCamelCase , (int, float) ) else "None"
if old_val is not None:
val_str += f' / {old_val:f}' if isinstance(__lowerCamelCase , (int, float) ) else "None"
if dif_val is not None:
val_str += f' ({dif_val:f})' if isinstance(__lowerCamelCase , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(__lowerCamelCase ) )
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = sys.argv[1]
lowerCAmelCase_ : Any = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 461
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=__magic_name__ ):
lowercase_ = ["torch", "torchsde"]
def __init__( self : Optional[int] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(self , ["torch", "torchsde"])
@classmethod
def a_ ( cls : Union[str, Any] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"])
@classmethod
def a_ ( cls : int , *UpperCamelCase_ : Any , **UpperCamelCase_ : str):
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"])
| 77
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __lowerCAmelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Dict=None , ) -> List[Any]:
if attention_mask is None:
lowerCamelCase_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCamelCase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCamelCase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __A:
def __init__( self : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : str=1_3 , __UpperCamelCase : List[str]=7 , __UpperCamelCase : Tuple=True , __UpperCamelCase : List[Any]=False , __UpperCamelCase : Dict=9_9 , __UpperCamelCase : Tuple=1_6 , __UpperCamelCase : Any=2 , __UpperCamelCase : Dict=4 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : Optional[Any]="gelu" , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : str=0.1 , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : str=2 , __UpperCamelCase : Tuple=1 , __UpperCamelCase : Optional[Any]=0 , __UpperCamelCase : str=0.02 , ):
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = eos_token_id
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = bos_token_id
lowerCamelCase_ = initializer_range
def lowercase__ ( self : str ):
lowerCamelCase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCamelCase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCamelCase_ = shift_tokens_right(__UpperCamelCase , 1 , 2 )
lowerCamelCase_ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__UpperCamelCase , )
lowerCamelCase_ = prepare_blenderbot_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def lowercase__ ( self : List[str] ):
lowerCamelCase_ , lowerCamelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase__ ( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : str ):
lowerCamelCase_ = 2_0
lowerCamelCase_ = model_class_name(__UpperCamelCase )
lowerCamelCase_ = model.encode(inputs_dict["""input_ids"""] )
lowerCamelCase_ , lowerCamelCase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowerCamelCase_ = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowerCamelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase_ = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
lowerCamelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCamelCase_ = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCamelCase , )
lowerCamelCase_ = model.decode(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] ):
lowerCamelCase_ = 2_0
lowerCamelCase_ = model_class_name(__UpperCamelCase )
lowerCamelCase_ = model.encode(inputs_dict["""input_ids"""] )
lowerCamelCase_ , lowerCamelCase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowerCamelCase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCamelCase_ = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase_ = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
lowerCamelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCamelCase_ = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
lowerCamelCase_ = model.decode(__UpperCamelCase , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase )
lowerCamelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class __A( unittest.TestCase ):
SCREAMING_SNAKE_CASE = 9_9
def lowercase__ ( self : Dict ):
lowerCamelCase_ = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowerCamelCase_ = input_ids.shape[0]
lowerCamelCase_ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase__ ( self : List[Any] ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._get_config_and_data()
lowerCamelCase_ = FlaxBlenderbotSmallForConditionalGeneration(__UpperCamelCase )
lowerCamelCase_ = lm_model(input_ids=__UpperCamelCase )
lowerCamelCase_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , __UpperCamelCase )
def lowercase__ ( self : Tuple ):
lowerCamelCase_ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowerCamelCase_ = FlaxBlenderbotSmallForConditionalGeneration(__UpperCamelCase )
lowerCamelCase_ = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
lowerCamelCase_ = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
lowerCamelCase_ = lm_model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase )
lowerCamelCase_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , __UpperCamelCase )
def lowercase__ ( self : int ):
lowerCamelCase_ = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
lowerCamelCase_ = shift_tokens_right(__UpperCamelCase , 1 , 2 )
lowerCamelCase_ = np.equal(__UpperCamelCase , 1 ).astype(np.floataa ).sum()
lowerCamelCase_ = np.equal(__UpperCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__UpperCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __A( UpperCAmelCase , unittest.TestCase , UpperCAmelCase ):
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def lowercase__ ( self : Optional[Any] ):
lowerCamelCase_ = FlaxBlenderbotSmallModelTester(self )
def lowercase__ ( self : str ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : List[str] ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = model_class(__UpperCamelCase )
@jax.jit
def encode_jitted(__UpperCamelCase : List[Any] , __UpperCamelCase : Dict=None , **__UpperCamelCase : Tuple ):
return model.encode(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
with self.subTest("""JIT Enabled""" ):
lowerCamelCase_ = encode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCamelCase_ = encode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase__ ( self : Any ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ = model_class(__UpperCamelCase )
lowerCamelCase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
lowerCamelCase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] ):
return model.decode(
decoder_input_ids=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , encoder_outputs=__UpperCamelCase , )
with self.subTest("""JIT Enabled""" ):
lowerCamelCase_ = decode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCamelCase_ = decode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase__ ( self : str ):
for model_class_name in self.all_model_classes:
lowerCamelCase_ = model_class_name.from_pretrained("""facebook/blenderbot_small-90M""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase_ = np.ones((1, 1) ) * model.config.eos_token_id
lowerCamelCase_ = model(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
| 272
| 0
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 450
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__A : Tuple = None
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
__A : List[Any] = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
__A : int = {
"""facebook/mbart-large-en-ro""": 1_0_2_4,
"""facebook/mbart-large-cc25""": 1_0_2_4,
}
# fmt: off
__A : List[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class UpperCAmelCase_ ( A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = ['''input_ids''', '''attention_mask''']
a__ = MBartTokenizer
a__ = []
a__ = []
def __init__( self : str , a : List[Any]=None , a : Optional[int]=None , a : str="<s>" , a : Tuple="</s>" , a : Optional[int]="</s>" , a : int="<s>" , a : List[str]="<unk>" , a : Tuple="<pad>" , a : Dict="<mask>" , a : Optional[Any]=None , a : List[Any]=None , a : Any=None , **a : Optional[Any] , ) -> Any:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
vocab_file=a , tokenizer_file=a , bos_token=a , eos_token=a , sep_token=a , cls_token=a , unk_token=a , pad_token=a , mask_token=a , src_lang=a , tgt_lang=a , additional_special_tokens=a , **a , )
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else """en_XX"""
SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCAmelCase ( self : int ) -> str:
return self._src_lang
@src_lang.setter
def _UpperCAmelCase ( self : List[Any] , a : str ) -> None:
SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCAmelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCAmelCase ( self : Tuple , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self : Optional[Any] , a : Optional[int] , a : str , a : Optional[str] , a : Optional[str] , **a : Optional[Any] ) -> Optional[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
SCREAMING_SNAKE_CASE = src_lang
SCREAMING_SNAKE_CASE = self(a , add_special_tokens=a , return_tensors=a , **a )
SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(a )
SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def _UpperCAmelCase ( self : str , a : List[str] , a : str = "en_XX" , a : Optional[List[str]] = None , a : str = "ro_RO" , **a : List[str] , ) -> BatchEncoding:
SCREAMING_SNAKE_CASE = src_lang
SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(a , a , **a )
def _UpperCAmelCase ( self : Optional[Any] ) -> str:
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCAmelCase ( self : str ) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCAmelCase ( self : Dict , a : List[str] ) -> None:
SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(a )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCAmelCase ( self : Dict , a : str ) -> None:
SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(a )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCAmelCase ( self : Dict , a : str , a : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
SCREAMING_SNAKE_CASE = os.path.join(
a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 450
| 1
|
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
def __init__( self : int , lowerCamelCase : Any , lowerCamelCase : Dict=1_00 , lowerCamelCase : Tuple=13 , lowerCamelCase : List[Any]=30 , lowerCamelCase : Optional[Any]=2 , lowerCamelCase : int=3 , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Any=True , lowerCamelCase : List[str]=32 , lowerCamelCase : int=5 , lowerCamelCase : List[Any]=4 , lowerCamelCase : Optional[int]=37 , lowerCamelCase : Optional[Any]="gelu" , lowerCamelCase : Tuple=0.1 , lowerCamelCase : Dict=0.1 , lowerCamelCase : int=10 , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : Optional[Any]=3 , ):
lowerCamelCase_ : List[str] = parent
lowerCamelCase_ : Union[str, Any] = vocab_size
lowerCamelCase_ : List[str] = batch_size
lowerCamelCase_ : Union[str, Any] = image_size
lowerCamelCase_ : int = patch_size
lowerCamelCase_ : Tuple = num_channels
lowerCamelCase_ : Union[str, Any] = is_training
lowerCamelCase_ : Tuple = use_labels
lowerCamelCase_ : Any = hidden_size
lowerCamelCase_ : Union[str, Any] = num_hidden_layers
lowerCamelCase_ : List[Any] = num_attention_heads
lowerCamelCase_ : Any = intermediate_size
lowerCamelCase_ : int = hidden_act
lowerCamelCase_ : int = hidden_dropout_prob
lowerCamelCase_ : int = attention_probs_dropout_prob
lowerCamelCase_ : Union[str, Any] = type_sequence_label_size
lowerCamelCase_ : int = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase_ : Optional[Any] = (image_size // patch_size) ** 2
lowerCamelCase_ : Union[str, Any] = num_patches + 1
def __a ( self : Union[str, Any] ):
lowerCamelCase_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Dict = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def __a ( self : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : str ):
lowerCamelCase_ : Tuple = FlaxBeitModel(config=lowerCamelCase )
lowerCamelCase_ : str = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : str , lowerCamelCase : List[str] , lowerCamelCase : Dict , lowerCamelCase : List[str] ):
lowerCamelCase_ : str = FlaxBeitForMaskedImageModeling(config=lowerCamelCase )
lowerCamelCase_ : str = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __a ( self : Dict , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str] ):
lowerCamelCase_ : List[str] = self.type_sequence_label_size
lowerCamelCase_ : Any = FlaxBeitForImageClassification(config=lowerCamelCase )
lowerCamelCase_ : Optional[int] = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : List[Any] = FlaxBeitForImageClassification(lowerCamelCase )
lowerCamelCase_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ : Optional[int] = model(lowerCamelCase )
def __a ( self : str ):
lowerCamelCase_ : List[str] = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) : int = config_and_inputs
lowerCamelCase_ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase_ ( snake_case__ , unittest.TestCase ):
_a : str = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def __a ( self : Tuple ):
lowerCamelCase_ : str = FlaxBeitModelTester(self )
lowerCamelCase_ : Tuple = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def __a ( self : Optional[int] ):
self.config_tester.run_common_tests()
def __a ( self : List[Any] ):
lowerCamelCase_ , lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Optional[int] = model_class(lowerCamelCase )
lowerCamelCase_ : Tuple = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : int = [*signature.parameters.keys()]
lowerCamelCase_ : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __a ( self : Dict ):
lowerCamelCase_ , lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ : Dict = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Dict = model_class(lowerCamelCase )
@jax.jit
def model_jitted(lowerCamelCase : Dict , **lowerCamelCase : int ):
return model(pixel_values=lowerCamelCase , **lowerCamelCase )
with self.subTest('JIT Enabled' ):
lowerCamelCase_ : List[str] = model_jitted(**lowerCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCamelCase_ : List[Any] = model_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __a ( self : int ):
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __a ( self : Optional[Any] ):
lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase )
def __a ( self : Optional[Any] ):
lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def __a ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
lowerCamelCase_ : Optional[int] = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
lowerCamelCase_ : Union[str, Any] = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( ):
lowerCamelCase_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def __a ( self : Optional[Any] ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def __a ( self : Union[str, Any] ):
lowerCamelCase_ : Optional[int] = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
lowerCamelCase_ : Any = self.default_image_processor
lowerCamelCase_ : List[Any] = prepare_img()
lowerCamelCase_ : List[Any] = image_processor(images=lowerCamelCase , return_tensors='np' ).pixel_values
# prepare bool_masked_pos
lowerCamelCase_ : Any = np.ones((1, 1_96) , dtype=lowerCamelCase )
# forward pass
lowerCamelCase_ : Optional[Any] = model(pixel_values=lowerCamelCase , bool_masked_pos=lowerCamelCase )
lowerCamelCase_ : int = outputs.logits
# verify the logits
lowerCamelCase_ : List[str] = (1, 1_96, 81_92)
self.assertEqual(logits.shape , lowerCamelCase )
lowerCamelCase_ : Union[str, Any] = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase , atol=1E-2 ) )
@slow
def __a ( self : Tuple ):
lowerCamelCase_ : Union[str, Any] = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
lowerCamelCase_ : List[str] = self.default_image_processor
lowerCamelCase_ : Union[str, Any] = prepare_img()
lowerCamelCase_ : List[Any] = image_processor(images=lowerCamelCase , return_tensors='np' )
# forward pass
lowerCamelCase_ : Dict = model(**lowerCamelCase )
lowerCamelCase_ : List[str] = outputs.logits
# verify the logits
lowerCamelCase_ : List[Any] = (1, 10_00)
self.assertEqual(logits.shape , lowerCamelCase )
lowerCamelCase_ : List[str] = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
lowerCamelCase_ : Optional[int] = 2_81
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase )
@slow
def __a ( self : Optional[Any] ):
lowerCamelCase_ : List[str] = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
lowerCamelCase_ : List[Any] = self.default_image_processor
lowerCamelCase_ : Optional[Any] = prepare_img()
lowerCamelCase_ : Tuple = image_processor(images=lowerCamelCase , return_tensors='np' )
# forward pass
lowerCamelCase_ : Union[str, Any] = model(**lowerCamelCase )
lowerCamelCase_ : Dict = outputs.logits
# verify the logits
lowerCamelCase_ : Optional[int] = (1, 2_18_41)
self.assertEqual(logits.shape , lowerCamelCase )
lowerCamelCase_ : Tuple = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
lowerCamelCase_ : Optional[int] = 23_96
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase )
| 364
|
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( snake_case__ , unittest.TestCase ):
_a : Optional[Any] = CodeGenTokenizer
_a : Optional[Any] = CodeGenTokenizerFast
_a : Union[str, Any] = True
_a : Dict = {'add_prefix_space': True}
_a : Dict = False
def __a ( self : Union[str, Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ : Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
lowerCamelCase_ : List[Any] = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
lowerCamelCase_ : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCamelCase_ : Optional[int] = {'unk_token': '<unk>'}
lowerCamelCase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase ) )
def __a ( self : Optional[int] , **lowerCamelCase : Dict ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def __a ( self : List[Any] , **lowerCamelCase : int ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase )
def __a ( self : Tuple , lowerCamelCase : List[str] ):
lowerCamelCase_ : List[str] = 'lower newer'
lowerCamelCase_ : str = 'lower newer'
return input_text, output_text
def __a ( self : Optional[int] ):
lowerCamelCase_ : List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase_ : Union[str, Any] = 'lower newer'
lowerCamelCase_ : List[str] = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowerCamelCase_ : Dict = tokenizer.tokenize(lowerCamelCase , add_prefix_space=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Dict = tokens + [tokenizer.unk_token]
lowerCamelCase_ : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
def __a ( self : str ):
if not self.test_rust_tokenizer:
return
lowerCamelCase_ : Dict = self.get_tokenizer()
lowerCamelCase_ : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase )
lowerCamelCase_ : int = 'lower newer'
# Testing tokenization
lowerCamelCase_ : List[str] = tokenizer.tokenize(lowerCamelCase , add_prefix_space=lowerCamelCase )
lowerCamelCase_ : List[Any] = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# Testing conversion to ids without special tokens
lowerCamelCase_ : List[str] = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
lowerCamelCase_ : str = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# Testing conversion to ids with special tokens
lowerCamelCase_ : Dict = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase )
lowerCamelCase_ : Tuple = tokenizer.encode(lowerCamelCase , add_prefix_space=lowerCamelCase )
lowerCamelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# Testing the unknown token
lowerCamelCase_ : Optional[int] = tokens + [rust_tokenizer.unk_token]
lowerCamelCase_ : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
def __a ( self : Tuple , *lowerCamelCase : Dict , **lowerCamelCase : Tuple ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __a ( self : Optional[int] , lowerCamelCase : Any=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
# Simple input
lowerCamelCase_ : Any = 'This is a simple input'
lowerCamelCase_ : Optional[int] = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase_ : Optional[Any] = ('This is a simple input', 'This is a pair')
lowerCamelCase_ : str = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowerCamelCase , tokenizer_r.encode , lowerCamelCase , max_length=lowerCamelCase , padding='max_length' )
# Simple input
self.assertRaises(lowerCamelCase , tokenizer_r.encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding='max_length' )
# Simple input
self.assertRaises(
lowerCamelCase , tokenizer_r.batch_encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding='max_length' , )
# Pair input
self.assertRaises(lowerCamelCase , tokenizer_r.encode , lowerCamelCase , max_length=lowerCamelCase , padding='max_length' )
# Pair input
self.assertRaises(lowerCamelCase , tokenizer_r.encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding='max_length' )
# Pair input
self.assertRaises(
lowerCamelCase , tokenizer_r.batch_encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding='max_length' , )
def __a ( self : Optional[Any] ):
lowerCamelCase_ : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
lowerCamelCase_ : Tuple = 'This is a simple input'
lowerCamelCase_ : Tuple = ['This is a simple input looooooooong', 'This is a simple input']
lowerCamelCase_ : Any = ('This is a simple input', 'This is a pair')
lowerCamelCase_ : Tuple = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
lowerCamelCase_ : str = tokenizer.pad_token_id
lowerCamelCase_ : int = tokenizer(lowerCamelCase , padding='max_length' , max_length=30 , return_tensors='np' )
lowerCamelCase_ : List[Any] = tokenizer(lowerCamelCase , padding=lowerCamelCase , truncate=lowerCamelCase , return_tensors='np' )
lowerCamelCase_ : Tuple = tokenizer(*lowerCamelCase , padding='max_length' , max_length=60 , return_tensors='np' )
lowerCamelCase_ : Union[str, Any] = tokenizer(lowerCamelCase , padding=lowerCamelCase , truncate=lowerCamelCase , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def __a ( self : str ):
lowerCamelCase_ : List[str] = '$$$'
lowerCamelCase_ : Dict = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCamelCase , add_bos_token=lowerCamelCase )
lowerCamelCase_ : Optional[Any] = 'This is a simple input'
lowerCamelCase_ : Optional[Any] = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase_ : List[str] = tokenizer.bos_token_id
lowerCamelCase_ : Optional[Any] = tokenizer(lowerCamelCase )
lowerCamelCase_ : Optional[int] = tokenizer(lowerCamelCase )
self.assertEqual(out_s.input_ids[0] , lowerCamelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCamelCase_ : str = tokenizer.decode(out_s.input_ids )
lowerCamelCase_ : Any = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowerCamelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __a ( self : List[str] ):
lowerCamelCase_ : Optional[int] = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
lowerCamelCase_ : Tuple = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
lowerCamelCase_ : Any = '\nif len_a > len_b: result = a\nelse: result = b'
lowerCamelCase_ : int = tokenizer.encode(lowerCamelCase )
lowerCamelCase_ : Optional[int] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
lowerCamelCase_ : str = tokenizer.decode(lowerCamelCase , truncate_before_pattern=lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
def __a ( self : Optional[Any] ):
pass
| 364
| 1
|
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : List[Any]=3 , lowerCamelCase : List[Any]=32 , lowerCamelCase : Optional[int]=3 , lowerCamelCase : str=10 , lowerCamelCase : Optional[Any]=[10, 20, 30, 40] , lowerCamelCase : Optional[Any]=[1, 1, 2, 1] , lowerCamelCase : Tuple=True , lowerCamelCase : int=True , lowerCamelCase : Any="relu" , lowerCamelCase : Union[str, Any]=3 , lowerCamelCase : List[Any]=None , ) -> List[str]:
__snake_case : Optional[Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : Tuple = image_size
__snake_case : Dict = num_channels
__snake_case : int = embeddings_size
__snake_case : str = hidden_sizes
__snake_case : Dict = depths
__snake_case : Union[str, Any] = is_training
__snake_case : Optional[int] = use_labels
__snake_case : str = hidden_act
__snake_case : Optional[int] = num_labels
__snake_case : Optional[int] = scope
__snake_case : List[Any] = len(_A )
def __snake_case ( self : List[str] ) -> List[str]:
__snake_case : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Tuple = None
if self.use_labels:
__snake_case : Any = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Dict = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : str ) -> Tuple:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __snake_case ( self : str , lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : str ) -> Tuple:
__snake_case : List[Any] = TFRegNetModel(config=_A )
__snake_case : Union[str, Any] = model(_A , training=_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self : List[str] , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : List[str] ) -> int:
__snake_case : str = self.num_labels
__snake_case : List[str] = TFRegNetForImageClassification(_A )
__snake_case : Optional[int] = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Dict ) -> Union[str, Any]:
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : str = config_and_inputs
__snake_case : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class a (_a , _a , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
__UpperCAmelCase : List[str] = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : str = False
__UpperCAmelCase : str = False
__UpperCAmelCase : str = False
__UpperCAmelCase : str = False
def __snake_case ( self : Dict ) -> str:
__snake_case : int = TFRegNetModelTester(self )
__snake_case : int = ConfigTester(self , config_class=_A , has_text_modality=_A )
def __snake_case ( self : Tuple ) -> Any:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def __snake_case ( self : str ) -> Tuple:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def __snake_case ( self : Optional[Any] ) -> Optional[int]:
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def __snake_case ( self : str ) -> str:
pass
def __snake_case ( self : Tuple ) -> int:
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : int = model_class(_A )
__snake_case : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Dict = [*signature.parameters.keys()]
__snake_case : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _A )
def __snake_case ( self : int ) -> Tuple:
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __snake_case ( self : Dict ) -> List[Any]:
def check_hidden_states_output(lowerCamelCase : List[str] , lowerCamelCase : Tuple , lowerCamelCase : str ):
__snake_case : Union[str, Any] = model_class(_A )
__snake_case : Optional[Any] = model(**self._prepare_for_class(_A , _A ) , training=_A )
__snake_case : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : Tuple = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[Any] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__snake_case : str = layer_type
__snake_case : Dict = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Union[str, Any] = True
check_hidden_states_output(_A , _A , _A )
def __snake_case ( self : str ) -> List[str]:
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict={} ):
__snake_case : Any = model(_A , return_dict=_A , **_A )
__snake_case : Any = model(_A , return_dict=_A , **_A ).to_tuple()
def recursive_check(lowerCamelCase : List[str] , lowerCamelCase : int ):
if isinstance(_A , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_A , _A ):
recursive_check(_A , _A )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_A , _A ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
F' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'
) , )
recursive_check(_A , _A )
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(_A )
__snake_case : List[Any] = self._prepare_for_class(_A , _A )
__snake_case : int = self._prepare_for_class(_A , _A )
check_equivalence(_A , _A , _A )
__snake_case : List[str] = self._prepare_for_class(_A , _A , return_labels=_A )
__snake_case : Union[str, Any] = self._prepare_for_class(_A , _A , return_labels=_A )
check_equivalence(_A , _A , _A )
__snake_case : List[Any] = self._prepare_for_class(_A , _A )
__snake_case : Optional[int] = self._prepare_for_class(_A , _A )
check_equivalence(_A , _A , _A , {"output_hidden_states": True} )
__snake_case : List[str] = self._prepare_for_class(_A , _A , return_labels=_A )
__snake_case : str = self._prepare_for_class(_A , _A , return_labels=_A )
check_equivalence(_A , _A , _A , {"output_hidden_states": True} )
def __snake_case ( self : str ) -> int:
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __snake_case ( self : Optional[Any] ) -> List[str]:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[str] = TFRegNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCAmelCase_ ( ):
__snake_case : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class a (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : int ) -> Tuple:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __snake_case ( self : int ) -> List[str]:
__snake_case : int = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__snake_case : Optional[int] = self.default_image_processor
__snake_case : Optional[int] = prepare_img()
__snake_case : Any = image_processor(images=_A , return_tensors="tf" )
# forward pass
__snake_case : List[str] = model(**_A , training=_A )
# verify the logits
__snake_case : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _A )
__snake_case : Tuple = tf.constant([-0.41_80, -1.50_51, -3.48_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1E-4 )
| 714
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_snake_case : int = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
_snake_case : Dict = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=False ):
__snake_case , __snake_case : Any = create_model(
"HTSAT-tiny" , "roberta" , __lowerCamelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=__lowerCamelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : int = {}
__snake_case : List[Any] = R".*sequential.(\d+).*"
__snake_case : Any = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case : Optional[int] = key.replace(__lowerCamelCase , __lowerCamelCase )
if re.match(__lowerCamelCase , __lowerCamelCase ):
# replace sequential layers with list
__snake_case : List[Any] = re.match(__lowerCamelCase , __lowerCamelCase ).group(1 )
__snake_case : str = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(__lowerCamelCase )//3}.linear.' )
elif re.match(__lowerCamelCase , __lowerCamelCase ):
__snake_case : Any = int(re.match(__lowerCamelCase , __lowerCamelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__snake_case : List[str] = 1 if projecton_layer == 0 else 2
__snake_case : Tuple = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__snake_case : List[str] = value
__snake_case : Optional[int] = mixed_qkv.size(0 ) // 3
__snake_case : List[str] = mixed_qkv[:qkv_dim]
__snake_case : int = mixed_qkv[qkv_dim : qkv_dim * 2]
__snake_case : str = mixed_qkv[qkv_dim * 2 :]
__snake_case : int = query_layer
__snake_case : Tuple = key_layer
__snake_case : List[str] = value_layer
else:
__snake_case : Tuple = value
return model_state_dict
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ):
__snake_case , __snake_case : List[str] = init_clap(__lowerCamelCase , enable_fusion=__lowerCamelCase )
clap_model.eval()
__snake_case : Union[str, Any] = clap_model.state_dict()
__snake_case : Any = rename_state_dict(__lowerCamelCase )
__snake_case : Dict = ClapConfig()
__snake_case : Dict = enable_fusion
__snake_case : Optional[Any] = ClapModel(__lowerCamelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
transformers_config.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_snake_case : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
_snake_case : Tuple = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 203
| 0
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__lowerCamelCase : str = logging.getLogger()
__lowerCamelCase : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
"""simple docstring"""
def _lowercase ( self : List[Any] , __A : str ):
os.makedirs(__A , exist_ok=__A )
snake_case__ : Optional[int] = {"source": "What is love ?", "target": "life"}
snake_case__ : List[Any] = {"train": 1_2, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
snake_case__ : List[Any] = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(__A , f'''{split}.{field}''' ) , "w" ) as f:
f.write(__A )
def _lowercase ( self : Optional[int] , __A : List[str] , __A : Dict = "pytorch" ):
snake_case__ : Optional[Any] = self.get_auto_remove_tmp_dir()
snake_case__ : List[Any] = os.path.join(__A , "output" )
snake_case__ : str = os.path.join(__A , "data" )
self._create_dummy_data(data_dir=__A )
snake_case__ : Any = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
snake_case__ : List[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__A , env=self.get_env() )
snake_case__ : List[Any] = os.path.join(__A , "metrics.json" )
with open(__A ) as f:
snake_case__ : Tuple = json.load(__A )
return result
@require_torch_gpu
def _lowercase ( self : Optional[int] ):
snake_case__ : Optional[Any] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def _lowercase ( self : int ):
snake_case__ : str = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _lowercase ( self : List[Any] ):
snake_case__ : str = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 297
|
from itertools import count
def UpperCamelCase__ ( _A: int = 50 ):
'''simple docstring'''
__lowerCamelCase = [1] * min_block_length
for n in count(_A ):
fill_count_functions.append(1 )
for block_length in range(_A , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 479
| 0
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Dict , __a : Union[str, Any]=13 , __a : Dict=7 , __a : Dict=True , __a : Dict=True , __a : Any=True , __a : List[str]=True , __a : int=99 , __a : Optional[int]=32 , __a : str=2 , __a : int=4 , __a : List[str]=37 , __a : Union[str, Any]="gelu" , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=0.1 , __a : List[Any]=512 , __a : int=16 , __a : Union[str, Any]=2 , __a : Union[str, Any]=0.02 , __a : List[str]=3 , __a : Dict=4 , __a : Optional[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = parent
__lowercase : Tuple = 13
__lowercase : Dict = 7
__lowercase : List[Any] = True
__lowercase : Tuple = True
__lowercase : List[str] = True
__lowercase : Any = True
__lowercase : Optional[int] = 99
__lowercase : str = 384
__lowercase : Optional[Any] = 2
__lowercase : Dict = 4
__lowercase : str = 37
__lowercase : Optional[int] = """gelu"""
__lowercase : int = 0.1
__lowercase : Union[str, Any] = 0.1
__lowercase : Tuple = 512
__lowercase : Tuple = 16
__lowercase : Optional[int] = 2
__lowercase : Optional[Any] = 0.02
__lowercase : Dict = 3
__lowercase : Union[str, Any] = 4
__lowercase : Tuple = 128
__lowercase : Optional[Any] = 2
__lowercase : int = 9
__lowercase : List[Any] = 1
__lowercase : Union[str, Any] = None
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[Any] = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
if self.use_token_type_ids:
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Optional[Any] = None
__lowercase : str = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Optional[int] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict , __a : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : str , __a : Union[str, Any] , __a : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TFConvBertModel(config=__a )
__lowercase : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase : Any = [input_ids, input_mask]
__lowercase : Dict = model(__a )
__lowercase : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Any , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Dict , __a : str ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = TFConvBertForMaskedLM(config=__a )
__lowercase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : Any = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : int , __a : Any , __a : Optional[int] , __a : int , __a : int , __a : List[Any] , __a : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.num_labels
__lowercase : List[Any] = TFConvBertForSequenceClassification(config=__a )
__lowercase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[str] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] , __a : Any , __a : Optional[Any] , __a : int , __a : Optional[int] , __a : Tuple , __a : int , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = self.num_choices
__lowercase : Dict = TFConvBertForMultipleChoice(config=__a )
__lowercase : List[str] = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : int = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowercase : Dict = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[str] , __a : str , __a : List[str] , __a : List[str] , __a : List[str] , __a : Any , __a : Tuple , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Tuple = TFConvBertForTokenClassification(config=__a )
__lowercase : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : str = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , __a : Optional[int] , __a : List[str] , __a : Optional[Any] , __a : int , __a : Tuple , __a : Any , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = TFConvBertForQuestionAnswering(config=__a )
__lowercase : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[Any] = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : int = config_and_inputs
__lowercase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A : str = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Union[str, Any] = False
_A : List[str] = False
_A : Dict = False
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : int = TFConvBertModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Union[str, Any] = True
__lowercase : List[Any] = True
if hasattr(__a , """use_cache""" ):
__lowercase : Optional[Any] = True
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : int = getattr(self.model_tester , """key_length""" , __a )
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Tuple = model_class(__a )
__lowercase : Tuple = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
__lowercase : List[Any] = os.path.join(__a , """saved_model""" , """1""" )
__lowercase : str = tf.keras.models.load_model(__a )
__lowercase : Optional[int] = model(__a )
if self.is_encoder_decoder:
__lowercase : Union[str, Any] = outputs["""encoder_hidden_states"""]
__lowercase : Union[str, Any] = outputs["""encoder_attentions"""]
else:
__lowercase : Union[str, Any] = outputs["""hidden_states"""]
__lowercase : List[str] = outputs["""attentions"""]
self.assertEqual(len(__a ) , __a )
__lowercase : List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : List[str] = True
__lowercase : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
__lowercase : Optional[int] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : List[str] = getattr(self.model_tester , """key_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """key_length""" , __a )
def check_decoder_attentions_output(__a : List[str] ):
__lowercase : Union[str, Any] = len(__a )
self.assertEqual(out_len % 2 , 0 )
__lowercase : Any = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a : str ):
__lowercase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase : int = True
__lowercase : Any = False
__lowercase : List[Any] = model_class(__a )
__lowercase : Tuple = model(self._prepare_for_class(__a , __a ) )
__lowercase : Dict = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
__lowercase : Any = model_class(__a )
__lowercase : List[str] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase : Dict = True
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
__lowercase : List[str] = True
__lowercase : List[Any] = True
__lowercase : Any = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
__lowercase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase : Tuple = model(__a )[0]
__lowercase : Any = [1, 6, 768]
self.assertEqual(output.shape , __a )
__lowercase : Optional[Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 649
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] | None = None ):
__lowercase : Tuple = word_bank or []
# create a table
__lowercase : int = len(lowerCAmelCase_ ) + 1
__lowercase : list[list[list[str]]] = []
for _ in range(lowerCAmelCase_ ):
table.append([] )
# seed value
__lowercase : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCAmelCase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCAmelCase_ )] == word:
__lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCAmelCase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCAmelCase_ )]:
combination.reverse()
return table[len(lowerCAmelCase_ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 649
| 1
|
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 662
|
from typing import TYPE_CHECKING
from ....utils import _LazyModule
SCREAMING_SNAKE_CASE_:int = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 662
| 1
|
from datetime import datetime as dt
import os
from github import Github
__A : List[str] = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def __UpperCamelCase ( ) ->Dict:
"""simple docstring"""
lowerCamelCase_ =Github(os.environ["""GITHUB_TOKEN"""] )
lowerCamelCase_ =g.get_repo("""huggingface/transformers""" )
lowerCamelCase_ =repo.get_issues(state="""open""" )
for issue in open_issues:
lowerCamelCase_ =sorted([comment for comment in issue.get_comments()] , key=lambda _A : i.created_at , reverse=_A )
lowerCamelCase_ =comments[0] if len(_A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 75
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[2, 2, 3, 2] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=["stage2", "stage3", "stage4"] , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , )-> Tuple:
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =num_stages
lowerCamelCase_ =hidden_sizes
lowerCamelCase_ =depths
lowerCamelCase_ =is_training
lowerCamelCase_ =use_labels
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =out_features
lowerCamelCase_ =num_labels
lowerCamelCase_ =scope
lowerCamelCase_ =num_stages
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ =self.get_config()
return config, pixel_values, labels
def _snake_case ( self )-> List[Any]:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _snake_case ( self )-> Union[str, Any]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_SCREAMING_SNAKE_CASE , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_SCREAMING_SNAKE_CASE , loss_ignore_index=255 , num_labels=self.num_labels , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[Any]:
lowerCamelCase_ =UperNetForSemanticSegmentation(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _snake_case ( self )-> str:
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) =config_and_inputs
lowerCamelCase_ ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:Optional[Any] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_UpperCamelCase:Any = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
_UpperCamelCase:Optional[Any] = False
_UpperCamelCase:Dict = False
_UpperCamelCase:int = False
_UpperCamelCase:Any = False
_UpperCamelCase:Optional[Any] = False
_UpperCamelCase:Optional[Any] = False
def _snake_case ( self )-> int:
lowerCamelCase_ =UperNetModelTester(self )
lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _snake_case ( self )-> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self )-> Tuple:
return
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ =[*signature.parameters.keys()]
lowerCamelCase_ =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def _snake_case ( self )-> str:
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def _snake_case ( self )-> str:
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _snake_case ( self )-> Optional[Any]:
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _snake_case ( self )-> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _snake_case ( self )-> List[Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case ( self )-> str:
pass
def _snake_case ( self )-> Optional[int]:
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCamelCase_ =model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ =self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ =True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ =_config_zero_init(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =_config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(config=_SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def _snake_case ( self )-> Dict:
pass
@slow
def _snake_case ( self )-> Tuple:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( ) ->Tuple:
"""simple docstring"""
lowerCamelCase_ =hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
lowerCamelCase_ =Image.open(_A ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
def _snake_case ( self )-> int:
lowerCamelCase_ =AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 75
| 1
|
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
SCREAMING_SNAKE_CASE = {
# 1536-bit
5: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 2048-bit
14: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AACAA68FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 3072-bit
15: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 4096-bit
16: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"""
+ """FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 6144-bit
17: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"""
+ """8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"""
+ """302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"""
+ """A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"""
+ """49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"""
+ """FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"""
+ """180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"""
+ """3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"""
+ """04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"""
+ """B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"""
+ """1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"""
+ """E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"""
+ """99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"""
+ """04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"""
+ """233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"""
+ """D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"""
+ """AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"""
+ """DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"""
+ """2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"""
+ """F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"""
+ """BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"""
+ """B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"""
+ """387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"""
+ """6DCC4024FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 8192-bit
18: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"""
+ """F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"""
+ """179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"""
+ """DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"""
+ """5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"""
+ """D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"""
+ """23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"""
+ """06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"""
+ """DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"""
+ """12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"""
+ """38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"""
+ """741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"""
+ """3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"""
+ """22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"""
+ """4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"""
+ """062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"""
+ """4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"""
+ """B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"""
+ """4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"""
+ """9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"""
+ """60C980DD98EDD3DFFFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
}
class __a :
def __init__( self : Dict , UpperCAmelCase_ : int = 14 )-> None:
"""simple docstring"""
if group not in primes:
raise ValueError("Unsupported Group" )
UpperCamelCase = primes[group]["prime"]
UpperCamelCase = primes[group]["generator"]
UpperCamelCase = int(hexlify(urandom(32 ) ) , base=16 )
def _SCREAMING_SNAKE_CASE ( self : str )-> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def _SCREAMING_SNAKE_CASE ( self : str )-> str:
"""simple docstring"""
UpperCamelCase = pow(self.generator , self.__private_key , self.prime )
return hex(UpperCAmelCase_ )[2:]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase_ : int )-> bool:
"""simple docstring"""
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(UpperCAmelCase_ , (self.prime - 1) // 2 , self.prime ) == 1
)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase_ : str )-> str:
"""simple docstring"""
UpperCamelCase = int(UpperCAmelCase_ , base=16 )
if not self.is_valid_public_key(UpperCAmelCase_ ):
raise ValueError("Invalid public key" )
UpperCamelCase = pow(UpperCAmelCase_ , self.__private_key , self.prime )
return shaaaa(str(UpperCAmelCase_ ).encode() ).hexdigest()
@staticmethod
def _SCREAMING_SNAKE_CASE ( UpperCAmelCase_ : int , UpperCAmelCase_ : int )-> bool:
"""simple docstring"""
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(UpperCAmelCase_ , (prime - 1) // 2 , UpperCAmelCase_ ) == 1
)
@staticmethod
def _SCREAMING_SNAKE_CASE ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 14 )-> str:
"""simple docstring"""
UpperCamelCase = int(UpperCAmelCase_ , base=16 )
UpperCamelCase = int(UpperCAmelCase_ , base=16 )
UpperCamelCase = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError("Invalid public key" )
UpperCamelCase = pow(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return shaaaa(str(UpperCAmelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 554
|
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __a :
def __init__( self : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : List[Any]=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Optional[Any]=None , )-> Tuple:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_multiple_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = weight_tying
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def _SCREAMING_SNAKE_CASE ( self : Dict )-> Dict:
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self : Any )-> List[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase = True
return config, input_ids, input_mask, token_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] )-> List[str]:
"""simple docstring"""
UpperCamelCase = GPTNeoXJapaneseModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCamelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
UpperCamelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] )-> List[str]:
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = GPTNeoXJapaneseModel(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCamelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] )-> Any:
"""simple docstring"""
UpperCamelCase = GPTNeoXJapaneseForCausalLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCamelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] )-> Any:
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = GPTNeoXJapaneseForCausalLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
# first forward pass
UpperCamelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ )
UpperCamelCase = output_from_no_past["hidden_states"][0]
UpperCamelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , )["hidden_states"][0]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-3 ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Any:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ : Union[str, Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
UpperCamelCase_ : Optional[int] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
UpperCamelCase_ : List[str] = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> List[str]:
"""simple docstring"""
UpperCamelCase = GPTNeoXJapaneseModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : str )-> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Dict:
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict )-> List[Any]:
"""simple docstring"""
# This regression test was failing with PyTorch < 1.3
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase = None
self.model_tester.create_and_check_model_as_decoder(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> List[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*UpperCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict )-> str:
"""simple docstring"""
UpperCamelCase = "abeja/gpt-neox-japanese-2.7b"
UpperCamelCase = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
UpperCamelCase = [
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
UpperCamelCase = GPTNeoXJapaneseTokenizer.from_pretrained(UpperCAmelCase_ )
UpperCamelCase = GPTNeoXJapaneseForCausalLM.from_pretrained(UpperCAmelCase_ )
UpperCamelCase = []
for prompt in prompts:
UpperCamelCase = tokenizer(UpperCAmelCase_ , return_tensors="pt" ).input_ids
UpperCamelCase = model.generate(UpperCAmelCase_ , max_length=50 )
UpperCamelCase = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
| 554
| 1
|
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
_lowerCamelCase : Tuple = logging.getLogger(__name__)
def _lowerCAmelCase ( __a , __a ) -> List[str]:
'''simple docstring'''
_UpperCamelCase :Any =np.argmax(__a , axis=1 )
return np.sum(outputs == labels )
def _lowerCAmelCase ( __a ) -> Union[str, Any]:
'''simple docstring'''
with open(__a , encoding="""utf_8""" ) as f:
_UpperCamelCase :str =csv.reader(__a )
_UpperCamelCase :Optional[Any] =[]
next(__a ) # skip the first line
for line in tqdm(__a ):
output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _lowerCAmelCase ( __a , __a , __a , __a , __a , __a ) -> List[str]:
'''simple docstring'''
_UpperCamelCase :str =[]
for dataset in encoded_datasets:
_UpperCamelCase :int =len(__a )
_UpperCamelCase :List[str] =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
_UpperCamelCase :Optional[int] =np.zeros((n_batch, 2) , dtype=np.intaa )
_UpperCamelCase :Any =np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
_UpperCamelCase :Optional[Any] =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__a ):
_UpperCamelCase :Union[str, Any] =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_UpperCamelCase :List[str] =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_UpperCamelCase :List[Any] =with_conta
_UpperCamelCase :str =with_conta
_UpperCamelCase :Dict =len(__a ) - 1
_UpperCamelCase :Tuple =len(__a ) - 1
_UpperCamelCase :Union[str, Any] =with_conta
_UpperCamelCase :Optional[Any] =with_conta
_UpperCamelCase :Dict =mc_label
_UpperCamelCase :Optional[Any] =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__a ) for t in all_inputs ) )
return tensor_datasets
def _lowerCAmelCase ( ) -> int:
'''simple docstring'''
_UpperCamelCase :Optional[int] =argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=__a , default="""openai-gpt""" , help="""pretrained model name""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""" )
parser.add_argument(
"""--output_dir""" , default=__a , type=__a , required=__a , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument("""--train_dataset""" , type=__a , default="""""" )
parser.add_argument("""--eval_dataset""" , type=__a , default="""""" )
parser.add_argument("""--seed""" , type=__a , default=42 )
parser.add_argument("""--num_train_epochs""" , type=__a , default=3 )
parser.add_argument("""--train_batch_size""" , type=__a , default=8 )
parser.add_argument("""--eval_batch_size""" , type=__a , default=16 )
parser.add_argument("""--adam_epsilon""" , default=1e-8 , type=__a , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , type=__a , default=1 )
parser.add_argument(
"""--max_steps""" , default=-1 , type=__a , help=(
"""If > 0: set total number of training steps to perform. Override num_train_epochs."""
) , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__a , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--learning_rate""" , type=__a , default=6.2_5e-5 )
parser.add_argument("""--warmup_steps""" , default=0 , type=__a , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--lr_schedule""" , type=__a , default="""warmup_linear""" )
parser.add_argument("""--weight_decay""" , type=__a , default=0.01 )
parser.add_argument("""--lm_coef""" , type=__a , default=0.9 )
parser.add_argument("""--n_valid""" , type=__a , default=3_74 )
parser.add_argument("""--server_ip""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" )
_UpperCamelCase :Dict =parser.parse_args()
print(__a )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__a )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
_UpperCamelCase :str =torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
_UpperCamelCase :Any =torch.cuda.device_count()
logger.info("""device: {}, n_gpu {}""".format(__a , __a ) )
if not args.do_train and not args.do_eval:
raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
_UpperCamelCase :List[Any] =["""_start_""", """_delimiter_""", """_classify_"""]
_UpperCamelCase :Optional[Any] =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__a )
_UpperCamelCase :List[Any] =tokenizer.convert_tokens_to_ids(__a )
_UpperCamelCase :List[Any] =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__a ) )
model.to(__a )
# Load and encode the datasets
def tokenize_and_encode(__a ):
if isinstance(__a , __a ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__a ) )
elif isinstance(__a , __a ):
return obj
return [tokenize_and_encode(__a ) for o in obj]
logger.info("""Encoding dataset...""" )
_UpperCamelCase :str =load_rocstories_dataset(args.train_dataset )
_UpperCamelCase :Union[str, Any] =load_rocstories_dataset(args.eval_dataset )
_UpperCamelCase :Optional[int] =(train_dataset, eval_dataset)
_UpperCamelCase :List[Any] =tokenize_and_encode(__a )
# Compute the max input length for the Transformer
_UpperCamelCase :int =model.config.n_positions // 2 - 2
_UpperCamelCase :Tuple =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
_UpperCamelCase :List[Any] =min(__a , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
_UpperCamelCase :Dict =pre_process_datasets(__a , __a , __a , *__a )
_UpperCamelCase :Optional[int] =tensor_datasets[0], tensor_datasets[1]
_UpperCamelCase :Any =TensorDataset(*__a )
_UpperCamelCase :str =RandomSampler(__a )
_UpperCamelCase :str =DataLoader(__a , sampler=__a , batch_size=args.train_batch_size )
_UpperCamelCase :Optional[Any] =TensorDataset(*__a )
_UpperCamelCase :List[str] =SequentialSampler(__a )
_UpperCamelCase :List[Any] =DataLoader(__a , sampler=__a , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
_UpperCamelCase :Any =args.max_steps
_UpperCamelCase :Tuple =args.max_steps // (len(__a ) // args.gradient_accumulation_steps) + 1
else:
_UpperCamelCase :Tuple =len(__a ) // args.gradient_accumulation_steps * args.num_train_epochs
_UpperCamelCase :Any =list(model.named_parameters() )
_UpperCamelCase :Optional[int] =["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
_UpperCamelCase :List[Any] =[
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0},
]
_UpperCamelCase :int =AdamW(__a , lr=args.learning_rate , eps=args.adam_epsilon )
_UpperCamelCase :Union[str, Any] =get_linear_schedule_with_warmup(
__a , num_warmup_steps=args.warmup_steps , num_training_steps=__a )
if args.do_train:
_UpperCamelCase :str =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="""Epoch""" ):
_UpperCamelCase :int =0
_UpperCamelCase :str =0
_UpperCamelCase :Optional[int] =tqdm(__a , desc="""Training""" )
for step, batch in enumerate(__a ):
_UpperCamelCase :Union[str, Any] =tuple(t.to(__a ) for t in batch )
_UpperCamelCase :Dict =batch
_UpperCamelCase :int =model(__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a )
_UpperCamelCase :Dict =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
_UpperCamelCase :List[str] =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
_UpperCamelCase :int ="""Training loss: {:.2e} lr: {:.2e}""".format(__a , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
_UpperCamelCase :Dict =model.module if hasattr(__a , """module""" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
_UpperCamelCase :Optional[Any] =os.path.join(args.output_dir , __a )
_UpperCamelCase :Optional[int] =os.path.join(args.output_dir , __a )
torch.save(model_to_save.state_dict() , __a )
model_to_save.config.to_json_file(__a )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
_UpperCamelCase :Tuple =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
_UpperCamelCase :Any =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__a )
if args.do_eval:
model.eval()
_UpperCamelCase :Any =0, 0
_UpperCamelCase :Tuple =0, 0
for batch in tqdm(__a , desc="""Evaluating""" ):
_UpperCamelCase :str =tuple(t.to(__a ) for t in batch )
_UpperCamelCase :Optional[Any] =batch
with torch.no_grad():
_UpperCamelCase :int =model(
__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a )
_UpperCamelCase :List[Any] =mc_logits.detach().cpu().numpy()
_UpperCamelCase :str =mc_labels.to("""cpu""" ).numpy()
_UpperCamelCase :Optional[Any] =accuracy(__a , __a )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
_UpperCamelCase :Tuple =eval_loss / nb_eval_steps
_UpperCamelCase :Optional[Any] =eval_accuracy / nb_eval_examples
_UpperCamelCase :List[str] =tr_loss / nb_tr_steps if args.do_train else None
_UpperCamelCase :str ={"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
_UpperCamelCase :Optional[int] =os.path.join(args.output_dir , """eval_results.txt""" )
with open(__a , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , __a , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 721
|
'''simple docstring'''
import math
def _lowerCAmelCase ( __a ) -> str:
'''simple docstring'''
_UpperCamelCase :Dict =0
_UpperCamelCase :List[str] =0
while num > 0:
_UpperCamelCase :Any =num % 8
_UpperCamelCase :str =octal + (remainder * math.floor(math.pow(10 , __a ) ))
counter += 1
_UpperCamelCase :int =math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'''0o{int(__a )}'''
def _lowerCAmelCase ( ) -> None:
'''simple docstring'''
print("""\n2 in octal is:""" )
print(decimal_to_octal(2 ) ) # = 2
print("""\n8 in octal is:""" )
print(decimal_to_octal(8 ) ) # = 10
print("""\n65 in octal is:""" )
print(decimal_to_octal(65 ) ) # = 101
print("""\n216 in octal is:""" )
print(decimal_to_octal(2_16 ) ) # = 330
print("""\n512 in octal is:""" )
print(decimal_to_octal(5_12 ) ) # = 1000
print("""\n""" )
if __name__ == "__main__":
main()
| 512
| 0
|
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def __get__( self , lowerCAmelCase__ , lowerCAmelCase__=None):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""")
__SCREAMING_SNAKE_CASE = """__cached_""" + self.fget.__name__
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
if cached is None:
__SCREAMING_SNAKE_CASE = self.fget(lowerCAmelCase__)
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
return cached
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"invalid truth value {val!r}" )
def _lowerCAmelCase ( UpperCamelCase_ ):
if is_torch_fx_proxy(UpperCamelCase_ ):
return True
if is_torch_available():
import torch
if isinstance(UpperCamelCase_ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCamelCase_ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCamelCase_ , (jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCamelCase_ , np.ndarray )
def _lowerCAmelCase ( UpperCamelCase_ ):
return isinstance(UpperCamelCase_ , np.ndarray )
def _lowerCAmelCase ( UpperCamelCase_ ):
return _is_numpy(UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
import torch
return isinstance(UpperCamelCase_ , torch.Tensor )
def _lowerCAmelCase ( UpperCamelCase_ ):
return False if not is_torch_available() else _is_torch(UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
import torch
return isinstance(UpperCamelCase_ , torch.device )
def _lowerCAmelCase ( UpperCamelCase_ ):
return False if not is_torch_available() else _is_torch_device(UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
if hasattr(UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = getattr(UpperCamelCase_ , UpperCamelCase_ )
else:
return False
return isinstance(UpperCamelCase_ , torch.dtype )
def _lowerCAmelCase ( UpperCamelCase_ ):
return False if not is_torch_available() else _is_torch_dtype(UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
import tensorflow as tf
return isinstance(UpperCamelCase_ , tf.Tensor )
def _lowerCAmelCase ( UpperCamelCase_ ):
return False if not is_tf_available() else _is_tensorflow(UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCamelCase_ , """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(UpperCamelCase_ )
return type(UpperCamelCase_ ) == tf.Tensor
def _lowerCAmelCase ( UpperCamelCase_ ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCamelCase_ , jnp.ndarray )
def _lowerCAmelCase ( UpperCamelCase_ ):
return False if not is_flax_available() else _is_jax(UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , (dict, UserDict) ):
return {k: to_py_obj(UpperCamelCase_ ) for k, v in obj.items()}
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return [to_py_obj(UpperCamelCase_ ) for o in obj]
elif is_tf_tensor(UpperCamelCase_ ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCamelCase_ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCamelCase_ ):
return np.asarray(UpperCamelCase_ ).tolist()
elif isinstance(UpperCamelCase_ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _lowerCAmelCase ( UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , (dict, UserDict) ):
return {k: to_numpy(UpperCamelCase_ ) for k, v in obj.items()}
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return np.array(UpperCamelCase_ )
elif is_tf_tensor(UpperCamelCase_ ):
return obj.numpy()
elif is_torch_tensor(UpperCamelCase_ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCamelCase_ ):
return np.asarray(UpperCamelCase_ )
else:
return obj
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = fields(self)
# Safety and consistency checks
if not len(lowerCAmelCase__):
raise ValueError(f"{self.__class__.__name__} has no fields.")
if not all(field.default is None for field in class_fields[1:]):
raise ValueError(f"{self.__class__.__name__} should not have more than one required field.")
__SCREAMING_SNAKE_CASE = getattr(self , class_fields[0].name)
__SCREAMING_SNAKE_CASE = all(getattr(self , field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(lowerCAmelCase__):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = first_field.items()
__SCREAMING_SNAKE_CASE = True
else:
try:
__SCREAMING_SNAKE_CASE = iter(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = True
except TypeError:
__SCREAMING_SNAKE_CASE = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowerCAmelCase__):
if (
not isinstance(lowerCAmelCase__ , (list, tuple))
or not len(lowerCAmelCase__) == 2
or not isinstance(element[0] , lowerCAmelCase__)
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__SCREAMING_SNAKE_CASE = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"Cannot set key/value for {element}. It needs to be a tuple (key, value).")
break
setattr(self , element[0] , element[1])
if element[1] is not None:
__SCREAMING_SNAKE_CASE = element[1]
elif first_field is not None:
__SCREAMING_SNAKE_CASE = first_field
else:
for field in class_fields:
__SCREAMING_SNAKE_CASE = getattr(self , field.name)
if v is not None:
__SCREAMING_SNAKE_CASE = v
def __delitem__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.")
def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__):
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.")
def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__):
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__):
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.")
def __getitem__( self , lowerCAmelCase__):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = dict(self.items())
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , lowerCAmelCase__ , lowerCAmelCase__):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowerCAmelCase__ , lowerCAmelCase__)
super().__setattr__(lowerCAmelCase__ , lowerCAmelCase__)
def __setitem__( self , lowerCAmelCase__ , lowerCAmelCase__):
# Will raise a KeyException if needed
super().__setitem__(lowerCAmelCase__ , lowerCAmelCase__)
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowerCAmelCase__ , lowerCAmelCase__)
def snake_case_ ( self):
return tuple(self[k] for k in self.keys())
class SCREAMING_SNAKE_CASE_ ( __a , __a ):
"""simple docstring"""
@classmethod
def snake_case_ ( cls , lowerCAmelCase__):
raise ValueError(
f"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}")
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : int = '''longest'''
__lowercase : Any = '''max_length'''
__lowercase : List[Any] = '''do_not_pad'''
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : List[str] = '''pt'''
__lowercase : str = '''tf'''
__lowercase : str = '''np'''
__lowercase : Optional[int] = '''jax'''
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = context_managers
__SCREAMING_SNAKE_CASE = ExitStack()
def __enter__( self):
for context_manager in self.context_managers:
self.stack.enter_context(lowerCAmelCase__)
def __exit__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
self.stack.__exit__(*lowerCAmelCase__ , **lowerCAmelCase__)
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = infer_framework(UpperCamelCase_ )
if framework == "tf":
__SCREAMING_SNAKE_CASE = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__SCREAMING_SNAKE_CASE = inspect.signature(model_class.forward ) # PyTorch models
else:
__SCREAMING_SNAKE_CASE = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = model_class.__name__
__SCREAMING_SNAKE_CASE = infer_framework(UpperCamelCase_ )
if framework == "tf":
__SCREAMING_SNAKE_CASE = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__SCREAMING_SNAKE_CASE = inspect.signature(model_class.forward ) # PyTorch models
else:
__SCREAMING_SNAKE_CASE = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ = "" , UpperCamelCase_ = "." ):
def _flatten_dict(UpperCamelCase_ , UpperCamelCase_="" , UpperCamelCase_="." ):
for k, v in d.items():
__SCREAMING_SNAKE_CASE = str(UpperCamelCase_ ) + delimiter + str(UpperCamelCase_ ) if parent_key else k
if v and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
yield from flatten_dict(UpperCamelCase_ , UpperCamelCase_ , delimiter=UpperCamelCase_ ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) )
@contextmanager
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_=None ):
if is_numpy_array(UpperCamelCase_ ):
return np.transpose(UpperCamelCase_ , axes=UpperCamelCase_ )
elif is_torch_tensor(UpperCamelCase_ ):
return array.T if axes is None else array.permute(*UpperCamelCase_ )
elif is_tf_tensor(UpperCamelCase_ ):
import tensorflow as tf
return tf.transpose(UpperCamelCase_ , perm=UpperCamelCase_ )
elif is_jax_tensor(UpperCamelCase_ ):
return jnp.transpose(UpperCamelCase_ , axes=UpperCamelCase_ )
else:
raise ValueError(f"Type not supported for transpose: {type(UpperCamelCase_ )}." )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if is_numpy_array(UpperCamelCase_ ):
return np.reshape(UpperCamelCase_ , UpperCamelCase_ )
elif is_torch_tensor(UpperCamelCase_ ):
return array.reshape(*UpperCamelCase_ )
elif is_tf_tensor(UpperCamelCase_ ):
import tensorflow as tf
return tf.reshape(UpperCamelCase_ , UpperCamelCase_ )
elif is_jax_tensor(UpperCamelCase_ ):
return jnp.reshape(UpperCamelCase_ , UpperCamelCase_ )
else:
raise ValueError(f"Type not supported for reshape: {type(UpperCamelCase_ )}." )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_=None ):
if is_numpy_array(UpperCamelCase_ ):
return np.squeeze(UpperCamelCase_ , axis=UpperCamelCase_ )
elif is_torch_tensor(UpperCamelCase_ ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCamelCase_ )
elif is_tf_tensor(UpperCamelCase_ ):
import tensorflow as tf
return tf.squeeze(UpperCamelCase_ , axis=UpperCamelCase_ )
elif is_jax_tensor(UpperCamelCase_ ):
return jnp.squeeze(UpperCamelCase_ , axis=UpperCamelCase_ )
else:
raise ValueError(f"Type not supported for squeeze: {type(UpperCamelCase_ )}." )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if is_numpy_array(UpperCamelCase_ ):
return np.expand_dims(UpperCamelCase_ , UpperCamelCase_ )
elif is_torch_tensor(UpperCamelCase_ ):
return array.unsqueeze(dim=UpperCamelCase_ )
elif is_tf_tensor(UpperCamelCase_ ):
import tensorflow as tf
return tf.expand_dims(UpperCamelCase_ , axis=UpperCamelCase_ )
elif is_jax_tensor(UpperCamelCase_ ):
return jnp.expand_dims(UpperCamelCase_ , axis=UpperCamelCase_ )
else:
raise ValueError(f"Type not supported for expand_dims: {type(UpperCamelCase_ )}." )
def _lowerCAmelCase ( UpperCamelCase_ ):
if is_numpy_array(UpperCamelCase_ ):
return np.size(UpperCamelCase_ )
elif is_torch_tensor(UpperCamelCase_ ):
return array.numel()
elif is_tf_tensor(UpperCamelCase_ ):
import tensorflow as tf
return tf.size(UpperCamelCase_ )
elif is_jax_tensor(UpperCamelCase_ ):
return array.size
else:
raise ValueError(f"Type not supported for expand_dims: {type(UpperCamelCase_ )}." )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
for key, value in auto_map.items():
if isinstance(UpperCamelCase_ , (tuple, list) ):
__SCREAMING_SNAKE_CASE = [f"{repo_id}--{v}" if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
__SCREAMING_SNAKE_CASE = f"{repo_id}--{value}"
return auto_map
def _lowerCAmelCase ( UpperCamelCase_ ):
for base_class in inspect.getmro(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = base_class.__module__
__SCREAMING_SNAKE_CASE = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"Could not infer framework from class {model_class}." )
| 155
|
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__magic_name__ = datasets.load_iris()
__magic_name__ = np.array(data["data"])
__magic_name__ = np.array(data["target"])
__magic_name__ = data["target_names"]
__magic_name__, __magic_name__, __magic_name__, __magic_name__ = train_test_split(X, y)
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
return np.linalg.norm(np.array(UpperCamelCase_ ) - np.array(UpperCamelCase_ ) )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=5 ):
__SCREAMING_SNAKE_CASE = zip(UpperCamelCase_ , UpperCamelCase_ )
# List of distances of all points from the point to be classified
__SCREAMING_SNAKE_CASE = []
for data_point in data:
__SCREAMING_SNAKE_CASE = euclidean_distance(data_point[0] , UpperCamelCase_ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__SCREAMING_SNAKE_CASE = [i[1] for i in sorted(UpperCamelCase_ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__SCREAMING_SNAKE_CASE = Counter(UpperCamelCase_ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 155
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''philschmid/bart-large-cnn-samsum'''
__lowerCamelCase : Union[str, Any] = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
__lowerCamelCase : List[str] = '''summarizer'''
__lowerCamelCase : Union[str, Any] = AutoTokenizer
__lowerCamelCase : Optional[Any] = AutoModelForSeqaSeqLM
__lowerCamelCase : int = ['''text''']
__lowerCamelCase : List[Any] = ['''text''']
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.pre_processor(SCREAMING_SNAKE_CASE_ ,return_tensors="""pt""" ,truncation=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.model.generate(**SCREAMING_SNAKE_CASE_ )[0]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.pre_processor.decode(SCREAMING_SNAKE_CASE_ ,skip_special_tokens=SCREAMING_SNAKE_CASE_ ,clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
| 315
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowercase ( __A : List[str] , __A : str , __A : str , __A : Path , __A : str = None , __A : str = None , __A : str = None , ) -> int:
'''simple docstring'''
if config_name_or_path is None:
snake_case : Tuple = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
snake_case : str = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
snake_case : List[Any] = question_encoder_name_or_path
snake_case : Optional[Any] = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
snake_case : Optional[Any] = RagConfig.from_pretrained(__A )
snake_case : Optional[Any] = AutoConfig.from_pretrained(__A )
snake_case : Tuple = AutoConfig.from_pretrained(__A )
snake_case : Tuple = gen_config
snake_case : Optional[Any] = question_encoder_config
snake_case : Tuple = model_class.from_pretrained_question_encoder_generator(
__A , __A , config=__A )
rag_model.save_pretrained(__A )
# Sanity check.
model_class.from_pretrained(__A )
# Save tokenizers.
snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(__A )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
snake_case : List[Any] = AutoTokenizer.from_pretrained(__A )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
__lowercase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__lowercase : Optional[Any] = parser.parse_args()
__lowercase : Dict = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 315
| 1
|
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ):
__lowerCamelCase = len(_UpperCamelCase )
__lowerCamelCase = len(_UpperCamelCase )
__lowerCamelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowerCamelCase = True
for i in range(_UpperCamelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowerCamelCase = True
if a[i].islower():
__lowerCamelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """canine"""
def __init__( self , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=16384 , __UpperCAmelCase=16 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0 , __UpperCAmelCase=0Xe_000 , __UpperCAmelCase=0Xe_001 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=8 , __UpperCAmelCase=16384 , __UpperCAmelCase=128 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = type_vocab_size
__lowerCamelCase = layer_norm_eps
# Character config:
__lowerCamelCase = downsampling_rate
__lowerCamelCase = upsampling_kernel_size
__lowerCamelCase = num_hash_functions
__lowerCamelCase = num_hash_buckets
__lowerCamelCase = local_transformer_stride
| 175
| 1
|
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 686
|
'''simple docstring'''
from jiwer import compute_measures
import datasets
lowerCamelCase :int = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowerCamelCase :int = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
lowerCamelCase :Optional[Any] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def _a (self , lowercase=None , lowercase=None , lowercase=False ):
if concatenate_texts:
return compute_measures(lowercase , lowercase )["wer"]
else:
A_ : List[Any] = 0
A_ : Optional[int] = 0
for prediction, reference in zip(lowercase , lowercase ):
A_ : Any = compute_measures(lowercase , lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 686
| 1
|
'''simple docstring'''
from __future__ import annotations
def __lowercase ( __lowercase , __lowercase ) -> Union[str, Any]:
'''simple docstring'''
print(F'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(__lowercase ):
print(F'''{i}\t\t{d}''' )
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> str:
'''simple docstring'''
for j in range(__lowercase ):
_A , _A , _A = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> list[float]:
'''simple docstring'''
_A = [float("inf" )] * vertex_count
_A = 0.0
for _ in range(vertex_count - 1 ):
for j in range(__lowercase ):
_A , _A , _A = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
_A = distance[u] + w
_A = check_negative_cycle(__lowercase , __lowercase , __lowercase )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = int(input('''Enter number of vertices: ''').strip())
lowerCamelCase_ = int(input('''Enter number of edges: ''').strip())
lowerCamelCase_ = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowerCamelCase_ = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowerCamelCase_ = int(input('''\nEnter shortest path source:''').strip())
lowerCamelCase_ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 330
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = '''▁'''
lowerCamelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowerCamelCase_ = {
'''facebook/mbart-large-50-one-to-many-mmt''': 10_24,
}
# fmt: off
lowerCamelCase_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = ['''input_ids''', '''attention_mask''']
snake_case = []
snake_case = []
def __init__( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Dict="</s>" , __UpperCAmelCase : Dict="</s>" , __UpperCAmelCase : List[str]="<s>" , __UpperCAmelCase : Optional[int]="<unk>" , __UpperCAmelCase : Optional[int]="<pad>" , __UpperCAmelCase : Union[str, Any]="<mask>" , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Optional[int] , ):
'''simple docstring'''
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
_A = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
_A = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_A = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_A = 1
_A = len(self.sp_model )
_A = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__UpperCAmelCase )
}
_A = {v: k for k, v in self.lang_code_to_id.items()}
_A = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_A = src_lang if src_lang is not None else "en_XX"
_A = self.lang_code_to_id[self._src_lang]
_A = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ):
'''simple docstring'''
_A = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Any ):
'''simple docstring'''
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : Optional[Any] , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase ( self : str , __UpperCAmelCase : str ):
'''simple docstring'''
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_A = self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : int ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
_A = []
_A = ""
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
_A = True
_A = []
else:
current_sub_tokens.append(__UpperCAmelCase )
_A = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def lowerCAmelCase ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , "wb" ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def lowerCAmelCase ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
_A = [1] * len(self.prefix_tokens )
_A = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] , __UpperCAmelCase : Optional[str] , **__UpperCAmelCase : List[Any] ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_A = src_lang
_A = self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
_A = self.convert_tokens_to_ids(__UpperCAmelCase )
_A = tgt_lang_id
return inputs
def lowerCAmelCase ( self : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : str = "en_XX" , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : str = "ro_RO" , **__UpperCAmelCase : List[str] , ):
'''simple docstring'''
_A = src_lang
_A = tgt_lang
return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ):
'''simple docstring'''
_A = self.lang_code_to_id[src_lang]
_A = [self.cur_lang_code_id]
_A = [self.eos_token_id]
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str ):
'''simple docstring'''
_A = self.lang_code_to_id[tgt_lang]
_A = [self.cur_lang_code_id]
_A = [self.eos_token_id]
| 330
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : List[str] = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
snake_case_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 166
|
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
snake_case_ : Any = logging.get_logger(__name__)
class __lowerCamelCase ( lowercase ):
lowerCamelCase__: Optional[int] = ['''input_values''', '''padding_mask''']
def __init__( self , __snake_case = 1 , __snake_case = 2_4_0_0_0 , __snake_case = 0.0 , __snake_case = None , __snake_case = None , **__snake_case , ) -> Dict:
"""simple docstring"""
super().__init__(feature_size=__snake_case , sampling_rate=__snake_case , padding_value=__snake_case , **__snake_case )
UpperCAmelCase: Any = chunk_length_s
UpperCAmelCase: Optional[int] = overlap
@property
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , __snake_case , __snake_case = None , __snake_case = False , __snake_case = None , __snake_case = None , __snake_case = None , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
UpperCAmelCase: Optional[Any] = True
UpperCAmelCase: List[str] = bool(
isinstance(__snake_case , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
UpperCAmelCase: int = [np.asarray(__snake_case , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__snake_case , np.ndarray ):
UpperCAmelCase: Dict = np.asarray(__snake_case , dtype=np.floataa )
elif isinstance(__snake_case , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
UpperCAmelCase: Optional[Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase: Any = [np.asarray(__snake_case ).T]
# verify inputs are valid
for idx, example in enumerate(__snake_case ):
if example.ndim > 2:
raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' )
UpperCAmelCase: Tuple = None
UpperCAmelCase: str = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
UpperCAmelCase: Any = min(array.shape[0] for array in raw_audio )
UpperCAmelCase: List[Any] = int(np.floor(max_length / self.chunk_stride ) )
UpperCAmelCase: List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
UpperCAmelCase: Optional[Any] = max(array.shape[0] for array in raw_audio )
UpperCAmelCase: Union[str, Any] = int(np.ceil(max_length / self.chunk_stride ) )
UpperCAmelCase: List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
UpperCAmelCase: int = "max_length"
else:
UpperCAmelCase: Optional[Any] = input_values
# normal padding on batch
if padded_inputs is None:
UpperCAmelCase: Union[str, Any] = self.pad(
__snake_case , max_length=__snake_case , truncation=__snake_case , padding=__snake_case , return_attention_mask=__snake_case , )
if padding:
UpperCAmelCase: str = padded_inputs.pop("attention_mask" )
UpperCAmelCase: Tuple = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
UpperCAmelCase: Optional[int] = example[..., None]
input_values.append(example.T )
UpperCAmelCase: str = input_values
if return_tensors is not None:
UpperCAmelCase: Optional[int] = padded_inputs.convert_to_tensors(__snake_case )
return padded_inputs
| 166
| 1
|
"""simple docstring"""
def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any] ):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowercase__ ( snake_case_ :str , snake_case_ :Dict=0 ):
return sorted(snake_case_ , key=lambda snake_case_ : x[column] )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Optional[int] , snake_case_ :Optional[Any]=float('''inf''' ) ):
for i in range(points_counts - 1 ):
for j in range(i + 1 , snake_case_ ):
__UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCAmelCase = current_dis
return min_dis
def lowercase__ ( snake_case_ :Dict , snake_case_ :List[str] , snake_case_ :Any=float('''inf''' ) ):
for i in range(min(6 , points_counts - 1 ) , snake_case_ ):
for j in range(max(0 , i - 6 ) , snake_case_ ):
__UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCAmelCase = current_dis
return min_dis
def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :List[str] , snake_case_ :Dict ):
# base case
if points_counts <= 3:
return dis_between_closest_pair(snake_case_ , snake_case_ )
# recursion
__UpperCAmelCase = points_counts // 2
__UpperCAmelCase = closest_pair_of_points_sqr(
snake_case_ , points_sorted_on_y[:mid] , snake_case_ )
__UpperCAmelCase = closest_pair_of_points_sqr(
snake_case_ , points_sorted_on_y[mid:] , points_counts - mid )
__UpperCAmelCase = min(snake_case_ , snake_case_ )
__UpperCAmelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(snake_case_ )
__UpperCAmelCase = dis_between_closest_in_strip(
snake_case_ , len(snake_case_ ) , snake_case_ )
return min(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :Any ):
__UpperCAmelCase = column_based_sort(snake_case_ , column=0 )
__UpperCAmelCase = column_based_sort(snake_case_ , column=1 )
return (
closest_pair_of_points_sqr(
snake_case_ , snake_case_ , snake_case_ )
) ** 0.5
if __name__ == "__main__":
_lowercase : Optional[Any] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points)))
| 49
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
snake_case : int = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
snake_case : List[str] = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
snake_case : Tuple = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
snake_case : str = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
snake_case : Any = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a=[1, 10, 100] , _a=4 , _a=3.0 ):
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=_a ) as executor:
__magic_name__ : Any = []
__magic_name__ : Union[str, Any] = Counter()
__magic_name__ : Union[str, Any] = 0
__magic_name__ : Optional[int] = defaultdict(_a )
for task_id, (candidates, test_case) in enumerate(zip(_a , _a ) ):
for candidate in candidates:
__magic_name__ : List[str] = candidate + "\n" + test_case
__magic_name__ : Tuple = (test_program, timeout, task_id, completion_id[task_id])
__magic_name__ : Optional[Any] = executor.submit(_a , *_a )
futures.append(_a )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_a ):
__magic_name__ : List[Any] = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
__magic_name__ , __magic_name__ : Optional[Any] = [], []
for result in results.values():
result.sort()
__magic_name__ : Any = [r[1]["passed"] for r in result]
total.append(len(_a ) )
correct.append(sum(_a ) )
__magic_name__ : List[Any] = np.array(_a )
__magic_name__ : Tuple = np.array(_a )
__magic_name__ : List[Any] = k
__magic_name__ : int = {f'''pass@{k}''': estimate_pass_at_k(_a , _a , _a ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : int , _snake_case : Union[str, Any] ) -> Any:
'''simple docstring'''
def estimator(_snake_case : int , _snake_case : int , _snake_case : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_snake_case , _snake_case ):
__magic_name__ : Optional[Any] = itertools.repeat(_snake_case , len(_snake_case ) )
else:
assert len(_snake_case ) == len(_snake_case )
__magic_name__ : int = iter(_snake_case )
return np.array([estimator(int(_snake_case ) , int(_snake_case ) , _snake_case ) for n, c in zip(_snake_case , _snake_case )] )
| 124
| 0
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
a_ : int = re.compile(R"""([A-Z]+)([A-Z][a-z])""")
a_ : Optional[int] = re.compile(R"""([a-z\d])([A-Z])""")
a_ : List[Any] = re.compile(R"""(?<!_)_(?!_)""")
a_ : int = re.compile(R"""(_{2,})""")
a_ : List[str] = R"""^\w+(\.\w+)*$"""
a_ : List[str] = R"""<>:/\|?*"""
def __snake_case ( UpperCAmelCase_ : Optional[Any] ):
lowerCamelCase_ = _uppercase_uppercase_re.sub(r"\1_\2" , UpperCAmelCase_ )
lowerCamelCase_ = _lowercase_uppercase_re.sub(r"\1_\2" , UpperCAmelCase_ )
return name.lower()
def __snake_case ( UpperCAmelCase_ : Dict ):
lowerCamelCase_ = _single_underscore_re.split(UpperCAmelCase_ )
lowerCamelCase_ = [_multiple_underscores_re.split(UpperCAmelCase_ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(UpperCAmelCase_ ) if n != "" )
def __snake_case ( UpperCAmelCase_ : str ):
if os.path.basename(UpperCAmelCase_ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(UpperCAmelCase_ )
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] ):
if os.path.basename(UpperCAmelCase_ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , UpperCAmelCase_ ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(UpperCAmelCase_ )}-{split}'''
def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple=None ):
lowerCamelCase_ = filename_prefix_for_split(UpperCAmelCase_ , UpperCAmelCase_ )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
lowerCamelCase_ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
return F'''{filepath}*'''
def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int=None , UpperCAmelCase_ : int=None ):
lowerCamelCase_ = filename_prefix_for_split(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
if shard_lengths:
lowerCamelCase_ = len(UpperCAmelCase_ )
lowerCamelCase_ = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(UpperCAmelCase_ )]
if filetype_suffix:
lowerCamelCase_ = [filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
lowerCamelCase_ = prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 445
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ : List[Any] = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = ["""OwlViTFeatureExtractor"""]
a_ : Any = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 445
| 1
|
import qiskit
def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> qiskit.result.counts.Counts:
UpperCamelCase : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
UpperCamelCase : int = qiskit.QuantumCircuit(snake_case__ , snake_case__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
UpperCamelCase : str = qiskit.execute(snake_case__ , snake_case__ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(snake_case__ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 40
|
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowerCAmelCase_ = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 560
| 0
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__snake_case = logging.get_logger(__name__)
class _a ( __a ):
"""simple docstring"""
A_ = ['''input_features''', '''is_longer''']
def __init__( self : List[str] , lowercase_ : Optional[Any]=64 , lowercase_ : List[Any]=48_000 , lowercase_ : Tuple=480 , lowercase_ : str=10 , lowercase_ : List[str]=1_024 , lowercase_ : List[str]=0.0 , lowercase_ : Union[str, Any]=False , lowercase_ : float = 0 , lowercase_ : float = 14_000 , lowercase_ : int = None , lowercase_ : str = "fusion" , lowercase_ : str = "repeatpad" , **lowercase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
feature_size=lowercase_ , sampling_rate=lowercase_ , padding_value=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
lowercase_ = top_db
lowercase_ = truncation
lowercase_ = padding
lowercase_ = fft_window_size
lowercase_ = (fft_window_size >> 1) + 1
lowercase_ = hop_length
lowercase_ = max_length_s
lowercase_ = max_length_s * sampling_rate
lowercase_ = sampling_rate
lowercase_ = frequency_min
lowercase_ = frequency_max
lowercase_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowercase_ , min_frequency=lowercase_ , max_frequency=lowercase_ , sampling_rate=lowercase_ , norm=lowercase_ , mel_scale="""htk""" , )
lowercase_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowercase_ , min_frequency=lowercase_ , max_frequency=lowercase_ , sampling_rate=lowercase_ , norm="""slaney""" , mel_scale="""slaney""" , )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = copy.deepcopy(self.__dict__ )
lowercase_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowerCamelCase__ ( self : int , lowercase_ : np.array , lowercase_ : Optional[np.array] = None ):
'''simple docstring'''
lowercase_ = spectrogram(
lowercase_ , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowercase_ , log_mel="""dB""" , )
return log_mel_spectrogram.T
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Dict ):
'''simple docstring'''
lowercase_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase_ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase_ = [0]
# randomly choose index for each part
lowercase_ = np.random.choice(ranges[0] )
lowercase_ = np.random.choice(ranges[1] )
lowercase_ = np.random.choice(ranges[2] )
lowercase_ = mel[idx_front : idx_front + chunk_frames, :]
lowercase_ = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase_ = mel[idx_back : idx_back + chunk_frames, :]
lowercase_ = torch.tensor(mel[None, None, :] )
lowercase_ = torch.nn.functional.interpolate(
lowercase_ , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=lowercase_ )
lowercase_ = mel_shrink[0][0].numpy()
lowercase_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowerCamelCase__ ( self : Any , lowercase_ : np.array , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase_ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase_ = len(lowercase_ ) - max_length
lowercase_ = np.random.randint(0 , overflow + 1 )
lowercase_ = waveform[idx : idx + max_length]
lowercase_ = self._np_extract_fbank_features(lowercase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase_ = self._np_extract_fbank_features(lowercase_ , self.mel_filters )
lowercase_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase_ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase_ = np.stack([mel, mel, mel, mel] , axis=0 )
lowercase_ = False
else:
lowercase_ = self._random_mel_fusion(lowercase_ , lowercase_ , lowercase_ )
lowercase_ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowercase_ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase_ = int(max_length / len(lowercase_ ) )
lowercase_ = np.stack(np.tile(lowercase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase_ = int(max_length / len(lowercase_ ) )
lowercase_ = np.stack(np.tile(lowercase_ , lowercase_ ) )
lowercase_ = np.pad(lowercase_ , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 )
if truncation == "fusion":
lowercase_ = self._np_extract_fbank_features(lowercase_ , self.mel_filters )
lowercase_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowercase_ = self._np_extract_fbank_features(lowercase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : int , lowercase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase_ : str = None , lowercase_ : Optional[str] = None , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Optional[int] , ):
'''simple docstring'''
lowercase_ = truncation if truncation is not None else self.truncation
lowercase_ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowercase_ = isinstance(lowercase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase_ = is_batched_numpy or (
isinstance(lowercase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase_ = [np.asarray(lowercase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowercase_ , np.ndarray ):
lowercase_ = np.asarray(lowercase_ , dtype=np.floataa )
elif isinstance(lowercase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase_ = [np.asarray(lowercase_ )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase_ = [
self._get_input_mel(lowercase_ , max_length if max_length else self.nb_max_samples , lowercase_ , lowercase_ )
for waveform in raw_speech
]
lowercase_ = []
lowercase_ = []
for mel, longer in padded_inputs:
input_mel.append(lowercase_ )
is_longer.append(lowercase_ )
if truncation == "fusion" and sum(lowercase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase_ = np.random.randint(0 , len(lowercase_ ) )
lowercase_ = True
if isinstance(input_mel[0] , lowercase_ ):
lowercase_ = [np.asarray(lowercase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase_ = [[longer] for longer in is_longer]
lowercase_ = {"""input_features""": input_mel, """is_longer""": is_longer}
lowercase_ = BatchFeature(lowercase_ )
if return_tensors is not None:
lowercase_ = input_features.convert_to_tensors(lowercase_ )
return input_features
| 603
|
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Optional[Any]:
lowercase_ = torch.exp(SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.sum(SCREAMING_SNAKE_CASE_ , dim=1 ) # sum of exp(x_i)
lowercase_ = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(SCREAMING_SNAKE_CASE_ ) - B / A
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowercase_ : Optional[Any] ):
'''simple docstring'''
super().__init__()
lowercase_ = config.output_attentions
lowercase_ = config.output_hidden_states
lowercase_ = nn.ModuleList([BertLayer(lowercase_ ) for _ in range(config.num_hidden_layers )] )
lowercase_ = nn.ModuleList([BertHighway(lowercase_ ) for _ in range(config.num_hidden_layers )] )
lowercase_ = [-1 for _ in range(config.num_hidden_layers )]
def lowerCamelCase__ ( self : List[Any] , lowercase_ : Tuple ):
'''simple docstring'''
if (type(lowercase_ ) is float) or (type(lowercase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
lowercase_ = x
else:
lowercase_ = x
def lowerCamelCase__ ( self : List[Any] , lowercase_ : Optional[Any] ):
'''simple docstring'''
lowercase_ = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def lowerCamelCase__ ( self : str , lowercase_ : Dict , lowercase_ : Optional[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[int]=None , lowercase_ : Union[str, Any]=None , ):
'''simple docstring'''
lowercase_ = ()
lowercase_ = ()
lowercase_ = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
lowercase_ = all_hidden_states + (hidden_states,)
lowercase_ = layer_module(
lowercase_ , lowercase_ , head_mask[i] , lowercase_ , lowercase_ )
lowercase_ = layer_outputs[0]
if self.output_attentions:
lowercase_ = all_attentions + (layer_outputs[1],)
lowercase_ = (hidden_states,)
if self.output_hidden_states:
lowercase_ = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowercase_ = current_outputs + (all_attentions,)
lowercase_ = self.highway[i](lowercase_ )
# logits, pooled_output
if not self.training:
lowercase_ = highway_exit[0]
lowercase_ = entropy(lowercase_ )
lowercase_ = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowercase_ = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowercase_ = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(lowercase_ , i + 1 )
else:
lowercase_ = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowercase_ = all_hidden_states + (hidden_states,)
lowercase_ = (hidden_states,)
if self.output_hidden_states:
lowercase_ = outputs + (all_hidden_states,)
if self.output_attentions:
lowercase_ = outputs + (all_attentions,)
lowercase_ = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , __a , )
class _a ( __a ):
"""simple docstring"""
def __init__( self : List[Any] , lowercase_ : Dict ):
'''simple docstring'''
super().__init__(lowercase_ )
lowercase_ = config
lowercase_ = BertEmbeddings(lowercase_ )
lowercase_ = DeeBertEncoder(lowercase_ )
lowercase_ = BertPooler(lowercase_ )
self.init_weights()
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
return self.embeddings.word_embeddings
def lowerCamelCase__ ( self : str , lowercase_ : List[str] ):
'''simple docstring'''
lowercase_ = value
def lowerCamelCase__ ( self : List[Any] , lowercase_ : Optional[Any] ):
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(lowercase_ )
@add_start_docstrings_to_model_forward(lowercase_ )
def lowerCamelCase__ ( self : Optional[Any] , lowercase_ : List[str]=None , lowercase_ : Tuple=None , lowercase_ : List[Any]=None , lowercase_ : Dict=None , lowercase_ : Optional[Any]=None , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
lowercase_ = input_ids.size()
elif inputs_embeds is not None:
lowercase_ = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
lowercase_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase_ = torch.ones(lowercase_ , device=lowercase_ )
if encoder_attention_mask is None:
lowercase_ = torch.ones(lowercase_ , device=lowercase_ )
if token_type_ids is None:
lowercase_ = torch.zeros(lowercase_ , dtype=torch.long , device=lowercase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase_ = self.get_extended_attention_mask(lowercase_ , lowercase_ , lowercase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowercase_ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowercase_ = encoder_attention_mask[:, None, None, :]
lowercase_ = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
lowercase_ = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase_ = self.get_head_mask(lowercase_ , self.config.num_hidden_layers )
lowercase_ = self.embeddings(
input_ids=lowercase_ , position_ids=lowercase_ , token_type_ids=lowercase_ , inputs_embeds=lowercase_ )
lowercase_ = self.encoder(
lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
lowercase_ = encoder_outputs[0]
lowercase_ = self.pooler(lowercase_ )
lowercase_ = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class _a ( __a ):
"""simple docstring"""
def __init__( self : int , lowercase_ : List[Any] , lowercase_ : int ):
'''simple docstring'''
lowercase_ = message
lowercase_ = exit_layer # start from 1!
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : int , lowercase_ : Any ):
'''simple docstring'''
super().__init__()
lowercase_ = BertPooler(lowercase_ )
lowercase_ = nn.Dropout(config.hidden_dropout_prob )
lowercase_ = nn.Linear(config.hidden_size , config.num_labels )
def lowerCamelCase__ ( self : int , lowercase_ : str ):
'''simple docstring'''
lowercase_ = encoder_outputs[0]
lowercase_ = self.pooler(lowercase_ )
# "return" pooler_output
# BertModel
lowercase_ = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowercase_ = bmodel_output[1]
lowercase_ = self.dropout(lowercase_ )
lowercase_ = self.classifier(lowercase_ )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , __a , )
class _a ( __a ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowercase_ : Any ):
'''simple docstring'''
super().__init__(lowercase_ )
lowercase_ = config.num_labels
lowercase_ = config.num_hidden_layers
lowercase_ = DeeBertModel(lowercase_ )
lowercase_ = nn.Dropout(config.hidden_dropout_prob )
lowercase_ = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(lowercase_ )
def lowerCamelCase__ ( self : Optional[int] , lowercase_ : Tuple=None , lowercase_ : str=None , lowercase_ : Any=None , lowercase_ : Any=None , lowercase_ : str=None , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Union[str, Any]=-1 , lowercase_ : List[str]=False , ):
'''simple docstring'''
lowercase_ = self.num_layers
try:
lowercase_ = self.bert(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , position_ids=lowercase_ , head_mask=lowercase_ , inputs_embeds=lowercase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowercase_ = outputs[1]
lowercase_ = self.dropout(lowercase_ )
lowercase_ = self.classifier(lowercase_ )
lowercase_ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowercase_ = e.message
lowercase_ = e.exit_layer
lowercase_ = outputs[0]
if not self.training:
lowercase_ = entropy(lowercase_ )
lowercase_ = []
lowercase_ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowercase_ = MSELoss()
lowercase_ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowercase_ = CrossEntropyLoss()
lowercase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowercase_ = []
for highway_exit in outputs[-1]:
lowercase_ = highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowercase_ = MSELoss()
lowercase_ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowercase_ = CrossEntropyLoss()
lowercase_ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowercase_ )
if train_highway:
lowercase_ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowercase_ = (loss,) + outputs
if not self.training:
lowercase_ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowercase_ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 603
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class a ( lowercase ):
UpperCamelCase : Union[str, Any] = """bert-generation"""
def __init__( self , UpperCamelCase_=50_358 , UpperCamelCase_=1_024 , UpperCamelCase_=24 , UpperCamelCase_=16 , UpperCamelCase_=4_096 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=0.02 , UpperCamelCase_=1E-12 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_="absolute" , UpperCamelCase_=True , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : Optional[Any] = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : List[Any] = max_position_embeddings
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Optional[int] = layer_norm_eps
UpperCAmelCase__ : Union[str, Any] = position_embedding_type
UpperCAmelCase__ : Dict = use_cache
| 110
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
UpperCamelCase__ = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class a ( unittest.TestCase ):
@classmethod
def __snake_case ( cls ):
UpperCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def __snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
UpperCAmelCase__ : Optional[int] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ , repo_id='test-config' , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
UpperCAmelCase__ : Union[str, Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def __snake_case ( self ):
UpperCAmelCase__ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
UpperCAmelCase__ : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id='valid_org/test-config-org' , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
UpperCAmelCase__ : Optional[Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def __snake_case ( self ):
CustomConfig.register_for_auto_class()
UpperCAmelCase__ : Dict = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=UpperCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class a ( unittest.TestCase ):
def __snake_case ( self ):
UpperCAmelCase__ : List[str] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
UpperCAmelCase__ : List[str] = c.n_embd + 1 # int
UpperCAmelCase__ : Optional[int] = c.resid_pdrop + 1.0 # float
UpperCAmelCase__ : Optional[int] = not c.scale_attn_weights # bool
UpperCAmelCase__ : str = c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(UpperCamelCase_ , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(UpperCamelCase_ , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(UpperCamelCase_ , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(UpperCamelCase_ , c.summary_type , 'mismatch for key: summary_type' )
def __snake_case ( self ):
UpperCAmelCase__ : Dict = PretrainedConfig()
UpperCAmelCase__ : str = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCamelCase_ , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
UpperCAmelCase__ : int = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCamelCase_ , UpperCamelCase_ )]
if len(UpperCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(UpperCamelCase_ )}.''' )
def __snake_case ( self ):
with self.assertRaises(UpperCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
UpperCAmelCase__ : str = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(UpperCamelCase_ )
def __snake_case ( self ):
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase__ : str = mock.Mock()
UpperCAmelCase__ : List[Any] = 500
UpperCAmelCase__ : Optional[int] = {}
UpperCAmelCase__ : Dict = HTTPError
UpperCAmelCase__ : Optional[int] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase__ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=UpperCamelCase_ ) as mock_head:
UpperCAmelCase__ : Optional[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def __snake_case ( self ):
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase__ : List[str] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def __snake_case ( self ):
UpperCAmelCase__ : int = AutoConfig.from_pretrained('bert-base-cased' )
UpperCAmelCase__ : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCamelCase_ )
UpperCAmelCase__ : List[str] = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCamelCase_ , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
UpperCAmelCase__ : Tuple = ['config.42.0.0.json']
UpperCAmelCase__ : Tuple = 768
configuration.save_pretrained(UpperCamelCase_ )
shutil.move(os.path.join(UpperCamelCase_ , 'config.4.0.0.json' ) , os.path.join(UpperCamelCase_ , 'config.42.0.0.json' ) )
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertEqual(new_configuration.hidden_size , 768 )
def __snake_case ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
UpperCAmelCase__ : Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
UpperCAmelCase__ : Optional[int] = 'v4.0.0'
UpperCAmelCase__ , UpperCAmelCase__ : Any = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCamelCase_ , return_unused_kwargs=UpperCamelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCamelCase_ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
UpperCAmelCase__ : List[Any] = 'v3.0.0'
UpperCAmelCase__ : Union[str, Any] = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 110
| 1
|
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def a__ ( ):
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowerCamelCase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching ,'''os.path.join''' ,_UpperCamelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os ,_PatchedModuleObj )
assert isinstance(_test_patching.os.path ,_PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path ,_PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os ,_PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path ,_PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path ,_PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def a__ ( ):
assert _test_patching.open is open
__lowerCamelCase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching ,'''open''' ,_UpperCamelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def a__ ( ):
# pandas.read_csv is not present in _test_patching
__lowerCamelCase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching ,'''pandas.read_csv''' ,_UpperCamelCase ):
pass
def a__ ( ):
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__lowerCamelCase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching ,'''len''' ,_UpperCamelCase ) is None
with patch_submodule(_test_patching ,'''len''' ,_UpperCamelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def a__ ( ):
__lowerCamelCase = '''__test_patch_submodule_start_and_stop_mock__'''
__lowerCamelCase = patch_submodule(_test_patching ,'''open''' ,_UpperCamelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def a__ ( ):
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowerCamelCase = '''__test_patch_submodule_successive_join__'''
__lowerCamelCase = '''__test_patch_submodule_successive_dirname__'''
__lowerCamelCase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching ,'''os.path.join''' ,_UpperCamelCase ):
with patch_submodule(_test_patching ,'''os.rename''' ,_UpperCamelCase ):
with patch_submodule(_test_patching ,'''os.path.dirname''' ,_UpperCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching ,'''os.rename''' ,_UpperCamelCase ):
with patch_submodule(_test_patching ,'''os.path.join''' ,_UpperCamelCase ):
with patch_submodule(_test_patching ,'''os.path.dirname''' ,_UpperCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def a__ ( ):
__lowerCamelCase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching ,'''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' ,_UpperCamelCase ):
pass
with patch_submodule(_test_patching ,'''os.__attribute_that_doesn_exist__''' ,_UpperCamelCase ):
pass
| 622
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=[1, 1, 2] , __UpperCAmelCase=1 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=8 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=3 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=False , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = block_sizes
__lowerCamelCase = num_decoder_layers
__lowerCamelCase = d_model
__lowerCamelCase = n_head
__lowerCamelCase = d_head
__lowerCamelCase = d_inner
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = 2
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowerCamelCase = n_head
# Used in the tests to check the size of the first hidden state
__lowerCamelCase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowerCamelCase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowerCamelCase = self.num_hidden_layers + 2
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForPreTraining(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForMaskedLM(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFFunnelForSequenceClassification(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = TFFunnelForMultipleChoice(config=__UpperCAmelCase )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFFunnelForTokenClassification(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForQuestionAnswering(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModelTester(self , base=__UpperCAmelCase )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
| 622
| 1
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = TransfoXLTokenizer
a = False
a = False
def lowercase_ ( self : List[str] ) -> List[Any]:
super().setUp()
SCREAMING_SNAKE_CASE__ = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase_ ( self : int , **__lowerCamelCase : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : str , __lowerCamelCase : Dict ) -> str:
SCREAMING_SNAKE_CASE__ = '''<unk> UNwanted , running'''
SCREAMING_SNAKE_CASE__ = '''<unk> unwanted, running'''
return input_text, output_text
def lowercase_ ( self : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(__lowerCamelCase , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [0, 4, 8, 7] )
def lowercase_ ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ = TransfoXLTokenizer(lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def lowercase_ ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ = TransfoXLTokenizer(lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowercase_ ( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = TransfoXLTokenizer(lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
SCREAMING_SNAKE_CASE__ = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowerCamelCase ) , __lowerCamelCase )
def lowercase_ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = len(__lowerCamelCase )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowerCamelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 493
|
import numpy as np
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ) -> None:
SCREAMING_SNAKE_CASE__ = np.array(__lowerCamelCase )
def lowercase_ ( self : List[str] , __lowerCamelCase : str ) -> np.ndarray:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = np.where(letter == self.SQUARE )
SCREAMING_SNAKE_CASE__ = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowercase_ ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowercase_ ( self : List[Any] , __lowerCamelCase : str ) -> str:
SCREAMING_SNAKE_CASE__ = message.lower()
SCREAMING_SNAKE_CASE__ = message.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE__ = message.replace('''j''' , '''i''' )
SCREAMING_SNAKE_CASE__ = np.empty((2, len(__lowerCamelCase )) )
for letter_index in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ = self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE__ = numbers[0]
SCREAMING_SNAKE_CASE__ = numbers[1]
SCREAMING_SNAKE_CASE__ = first_step.reshape(2 * len(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = ''''''
for numbers_index in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ = int(second_step[numbers_index * 2] )
SCREAMING_SNAKE_CASE__ = int(second_step[(numbers_index * 2) + 1] )
SCREAMING_SNAKE_CASE__ = self.numbers_to_letter(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = encoded_message + letter
return encoded_message
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : str ) -> str:
SCREAMING_SNAKE_CASE__ = message.lower()
message.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE__ = np.empty(2 * len(__lowerCamelCase ) )
for letter_index in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ = self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE__ = numbers[0]
SCREAMING_SNAKE_CASE__ = numbers[1]
SCREAMING_SNAKE_CASE__ = first_step.reshape((2, len(__lowerCamelCase )) )
SCREAMING_SNAKE_CASE__ = ''''''
for numbers_index in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ = int(second_step[0, numbers_index] )
SCREAMING_SNAKE_CASE__ = int(second_step[1, numbers_index] )
SCREAMING_SNAKE_CASE__ = self.numbers_to_letter(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = decoded_message + letter
return decoded_message
| 493
| 1
|
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {"vocab_file": "spiece.model"}
_SCREAMING_SNAKE_CASE : Tuple = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
_SCREAMING_SNAKE_CASE : str = {
"AI-Sweden/gpt-sw3-126m": 2_0_4_8,
"AI-Sweden/gpt-sw3-350m": 2_0_4_8,
"AI-Sweden/gpt-sw3-1.6b": 2_0_4_8,
"AI-Sweden/gpt-sw3-6.7b": 2_0_4_8,
"AI-Sweden/gpt-sw3-20b": 2_0_4_8,
}
class UpperCamelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = None , **UpperCamelCase__ , ):
A__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
A__ : Union[str, Any] = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
A__ : str = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
A__ : int = '''<|endoftext|>''' if eos_token is None else eos_token
A__ : str = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
A__ : str = unk_token if pad_token is None else pad_token
A__ : Dict = eos_token if bos_token is None else bos_token
else:
A__ : Optional[Any] = '''<pad>''' if pad_token is None else pad_token
A__ : Any = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
A__ : List[Any] = do_lower_case
A__ : Tuple = remove_space
A__ : Optional[Any] = keep_accents
A__ : List[str] = vocab_file
A__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
# Used for whitespace normalization in input texts
# fmt : off
A__ : Optional[Any] = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
A__ : Optional[int] = re.compile(
F"[{''.join(map(_lowercase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]" )
def __getstate__( self ):
A__ : Optional[Any] = self.__dict__.copy()
A__ : Any = None
return state
def __setstate__( self , UpperCamelCase__ ):
A__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A__ : Dict = {}
A__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __snake_case ( self ):
return len(self.sp_model )
def __snake_case ( self , UpperCamelCase__ ):
A__ : Optional[int] = self.non_printing_characters_re.sub('''''' , _lowercase )
# Normalize whitespaces
A__ : List[Any] = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
A__ : Optional[int] = unicodedata.normalize('''NFC''' , _lowercase )
return text
def __snake_case ( self , UpperCamelCase__ , **UpperCamelCase__ ):
A__ : int = self.preprocess_text(_lowercase )
return self.sp_model.encode(_lowercase , out_type=_lowercase )
def __snake_case ( self , UpperCamelCase__ ):
return self.sp_model.PieceToId(_lowercase )
def __snake_case ( self , UpperCamelCase__ ):
return self.sp_model.IdToPiece(_lowercase )
@staticmethod
def __snake_case ( UpperCamelCase__ ):
return out_string
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = []
A__ : Any = ''''''
A__ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowercase ) + token
A__ : Any = True
A__ : Tuple = []
else:
current_sub_tokens.append(_lowercase )
A__ : Union[str, Any] = False
out_string += self.sp_model.decode(_lowercase )
return out_string
def __snake_case ( self ):
A__ : List[Any] = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
if not os.path.isdir(_lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
A__ : Tuple = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase , '''wb''' ) as fi:
A__ : Tuple = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (out_vocab_file,)
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = False ):
if isinstance(_lowercase , _lowercase ):
A__ : Union[str, Any] = self.preprocess_text(_lowercase )
A__ : Optional[Any] = self.sp_model.encode(_lowercase )
else:
A__ : Dict = [self.preprocess_text(_lowercase ) for t in text]
A__ : Union[str, Any] = self.sp_model.encode(_lowercase )
if return_tensors is True or return_tensors == "pt":
A__ : Optional[int] = torch.tensor(_lowercase )
return token_ids
def __snake_case ( self , UpperCamelCase__ ):
return self.sp_model.decode(_lowercase )
def __snake_case ( self , UpperCamelCase__ ):
A__ : Optional[int] = [F"User: {text}" if is_user else F"Bot: {text}" for is_user, text in conversation.iter_texts()]
A__ : Dict = (
F"{self.eos_token}{self.bos_token}" + F"{self.bos_token}".join(_lowercase ) + F"{self.bos_token}Bot:"
)
return self.encode(text=_lowercase )
| 702
|
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(__UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 55
| 0
|
import os
from distutils.util import strtobool
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] ):
for e in env_keys:
__a : List[str] = int(os.environ.get(lowerCamelCase_ , -1 ) )
if val >= 0:
return val
return default
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str=False ):
__a : Dict = os.environ.get(lowerCamelCase_ , str(lowerCamelCase_ ) )
return strtobool(lowerCamelCase_ ) == 1 # As its name indicates `strtobool` actually returns an int...
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple="no" ):
__a : str = os.environ.get(lowerCamelCase_ , str(lowerCamelCase_ ) )
return value
| 47
|
"""simple docstring"""
_lowerCAmelCase = {"""a""": ["""c""", """b"""], """b""": ["""d""", """e"""], """c""": [], """d""": [], """e""": []}
_lowerCAmelCase = ["""a""", """b""", """c""", """d""", """e"""]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = start
# add current to visited
visited.append(_lowerCamelCase )
_lowerCAmelCase : int = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
_lowerCAmelCase : Any = topological_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# if all neighbors visited add current to sort
sort.append(_lowerCamelCase )
# if all vertices haven't been visited select a new one to visit
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
for vertice in vertices:
if vertice not in visited:
_lowerCAmelCase : List[Any] = topological_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# return sort
return sort
if __name__ == "__main__":
_lowerCAmelCase = topological_sort("""a""", [], [])
print(sort)
| 259
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['MaskFormerFeatureExtractor']
UpperCamelCase = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
UpperCamelCase = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 718
|
import fire
from utils import calculate_rouge, save_json
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] ) -> List[str]:
__UpperCamelCase : Any = [x.strip() for x in open(__lowerCAmelCase ).readlines()]
__UpperCamelCase : Dict = [x.strip() for x in open(__lowerCAmelCase ).readlines()][: len(__lowerCAmelCase )]
__UpperCamelCase : Optional[Any] = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
if save_path is not None:
save_json(__lowerCAmelCase , __lowerCAmelCase , indent=__lowerCAmelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 515
| 0
|
from copy import deepcopy
class _A :
def __init__( self : Optional[int] , _A : list[int] | None = None , _A : int | None = None ) -> None:
"""simple docstring"""
if arr is None and size is not None:
lowercase : Dict = size
lowercase : Dict = [0] * size
elif arr is not None:
self.init(_A )
else:
raise ValueError('''Either arr or size must be specified''' )
def __a ( self : Any , _A : list[int] ) -> None:
"""simple docstring"""
lowercase : Any = len(_A )
lowercase : Tuple = deepcopy(_A )
for i in range(1 , self.size ):
lowercase : int = self.next_(_A )
if j < self.size:
self.tree[j] += self.tree[i]
def __a ( self : Optional[Any] ) -> list[int]:
"""simple docstring"""
lowercase : Union[str, Any] = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowercase : List[Any] = self.next_(_A )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __a ( _A : int ) -> int:
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def __a ( _A : int ) -> int:
"""simple docstring"""
return index - (index & (-index))
def __a ( self : str , _A : int , _A : int ) -> None:
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowercase : Tuple = self.next_(_A )
def __a ( self : Optional[Any] , _A : int , _A : int ) -> None:
"""simple docstring"""
self.add(_A , value - self.get(_A ) )
def __a ( self : Optional[int] , _A : int ) -> int:
"""simple docstring"""
if right == 0:
return 0
lowercase : str = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowercase : Optional[int] = self.prev(_A )
return result
def __a ( self : Optional[Any] , _A : int , _A : int ) -> int:
"""simple docstring"""
return self.prefix(_A ) - self.prefix(_A )
def __a ( self : Optional[Any] , _A : int ) -> int:
"""simple docstring"""
return self.query(_A , index + 1 )
def __a ( self : Optional[int] , _A : int ) -> int:
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
lowercase : Optional[int] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowercase : Dict = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 217
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
lowerCAmelCase_ = {
'gpt2': 10_24,
'gpt2-medium': 10_24,
'gpt2-large': 10_24,
'gpt2-xl': 10_24,
'distilgpt2': 10_24,
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : Optional[Any] = GPTaTokenizer
def __init__( self : Optional[Any] , _A : Optional[Any]=None , _A : Any=None , _A : Tuple=None , _A : Optional[int]="<|endoftext|>" , _A : List[Any]="<|endoftext|>" , _A : Union[str, Any]="<|endoftext|>" , _A : Any=False , **_A : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
_A , _A , tokenizer_file=_A , unk_token=_A , bos_token=_A , eos_token=_A , add_prefix_space=_A , **_A , )
lowercase : List[str] = kwargs.pop('''add_bos_token''' , _A )
lowercase : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
lowercase : Optional[int] = getattr(_A , pre_tok_state.pop('''type''' ) )
lowercase : List[str] = add_prefix_space
lowercase : List[Any] = pre_tok_class(**_A )
lowercase : Dict = add_prefix_space
def __a ( self : List[Any] , *_A : Optional[Any] , **_A : Any ) -> BatchEncoding:
"""simple docstring"""
lowercase : List[str] = kwargs.get('''is_split_into_words''' , _A )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_A , **_A )
def __a ( self : Dict , *_A : List[str] , **_A : Dict ) -> BatchEncoding:
"""simple docstring"""
lowercase : Any = kwargs.get('''is_split_into_words''' , _A )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_A , **_A )
def __a ( self : str , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowercase : Union[str, Any] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def __a ( self : Dict , _A : "Conversation" ) -> List[int]:
"""simple docstring"""
lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_A , add_special_tokens=_A ) + [self.eos_token_id] )
if len(_A ) > self.model_max_length:
lowercase : int = input_ids[-self.model_max_length :]
return input_ids
| 217
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( __a = 10_00 ) -> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 512
|
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
_lowerCamelCase : str = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class lowerCamelCase__ ( tr.AbstractTransform ):
def __init__( self , lowerCAmelCase__ = " " ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :Dict =sentence_delimiter
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> Dict:
"""simple docstring"""
return list(lowerCAmelCase__ )
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :int =[]
for sent_idx, sentence in enumerate(lowerCAmelCase__ ):
chars.extend(self.process_string(lowerCAmelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCAmelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
_lowerCamelCase : Any = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
_lowerCamelCase : str = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
_lowerCamelCase : int = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
_lowerCamelCase : Tuple = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
_lowerCamelCase : Optional[int] = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Optional[int]:
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )["wer"]
_UpperCamelCase :str =0
_UpperCamelCase :Tuple =0
for prediction, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase :Optional[int] =jiwer.compute_measures(
lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 512
| 1
|
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.0_2 , A_=False , A_=True , A_="None" , A_=3 , A_=4 , A_=None , ) -> Optional[int]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = relative_attention
lowerCAmelCase = position_biased_input
lowerCAmelCase = pos_att_type
lowerCAmelCase = scope
def __snake_case ( self ) -> Any:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self ) -> Dict:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __snake_case ( self , A_ ) -> Union[str, Any]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
lowerCAmelCase = DebertaVaModel(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ , attention_mask=A_ , token_type_ids=A_ )[0]
lowerCAmelCase = model(A_ , token_type_ids=A_ )[0]
lowerCAmelCase = model(A_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
lowerCAmelCase = DebertaVaForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = DebertaVaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(A_ )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Any:
lowerCAmelCase = self.num_labels
lowerCAmelCase = DebertaVaForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
lowerCAmelCase = DebertaVaForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(
A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Any:
lowerCAmelCase = DebertaVaForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Tuple = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase : Dict = True
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Dict = False
UpperCAmelCase : List[str] = False
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = DebertaVaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __snake_case ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*A_ )
def __snake_case ( self ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*A_ )
def __snake_case ( self ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*A_ )
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*A_ )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*A_ )
def __snake_case ( self ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*A_ )
@slow
def __snake_case ( self ) -> List[Any]:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = DebertaVaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def __snake_case ( self ) -> Tuple:
pass
@slow
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
lowerCAmelCase = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
lowerCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase = model(A_ , attention_mask=A_ )[0]
# compare the actual values for a slice.
lowerCAmelCase = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1e-4 ) , f'{output[:, 1:4, 1:4]}' )
| 433
|
'''simple docstring'''
from math import factorial
def _snake_case ( _SCREAMING_SNAKE_CASE : int = 100 ) -> int:
"""simple docstring"""
return sum(map(_SCREAMING_SNAKE_CASE , str(factorial(_SCREAMING_SNAKE_CASE ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 433
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class __A :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] , ) ->Tuple:
"""simple docstring"""
snake_case_ = parent
snake_case_ = 13
snake_case_ = 7
snake_case_ = True
snake_case_ = True
snake_case_ = False
snake_case_ = True
snake_case_ = 99
snake_case_ = 32
snake_case_ = 2
snake_case_ = 4
snake_case_ = 37
snake_case_ = """gelu"""
snake_case_ = 0.1
snake_case_ = 0.1
snake_case_ = 512
snake_case_ = 16
snake_case_ = 2
snake_case_ = 0.02
snake_case_ = 3
snake_case_ = 4
snake_case_ = None
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ) ->str:
"""simple docstring"""
snake_case_ = TFDistilBertModel(config=UpperCAmelCase_ )
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
snake_case_ = model(UpperCAmelCase_ )
snake_case_ = [input_ids, input_mask]
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ) ->int:
"""simple docstring"""
snake_case_ = TFDistilBertForMaskedLM(config=UpperCAmelCase_ )
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str ) ->Optional[int]:
"""simple docstring"""
snake_case_ = TFDistilBertForQuestionAnswering(config=UpperCAmelCase_ )
snake_case_ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->int:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = TFDistilBertForSequenceClassification(UpperCAmelCase_ )
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict ) ->int:
"""simple docstring"""
snake_case_ = self.num_choices
snake_case_ = TFDistilBertForMultipleChoice(UpperCAmelCase_ )
snake_case_ = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ) ->List[str]:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = TFDistilBertForTokenClassification(UpperCAmelCase_ )
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
((snake_case_) , (snake_case_) , (snake_case_) , (snake_case_) , (snake_case_) , (snake_case_)) = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: str = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__lowercase: int = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase: Any = False
__lowercase: Optional[int] = False
def lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
snake_case_ = TFDistilBertModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , dim=37 )
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
snake_case_ = TFDistilBertModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __A (unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
snake_case_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case_ = model(UpperCAmelCase_ )[0]
snake_case_ = [1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase_ )
snake_case_ = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
| 2
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2
| 1
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def A__ ( __lowerCAmelCase : Namespace ):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
UpperCamelCase : Dict = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class UpperCamelCase__ (a ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( _lowerCAmelCase ):
lowerCamelCase__ = parser.add_parser(
"""convert""" ,help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" ,)
train_parser.add_argument("""--model_type""" ,type=_lowerCAmelCase ,required=_lowerCAmelCase ,help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" ,type=_lowerCAmelCase ,required=_lowerCAmelCase ,help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" ,type=_lowerCAmelCase ,required=_lowerCAmelCase ,help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" ,type=_lowerCAmelCase ,default="""""" ,help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" ,type=_lowerCAmelCase ,default=_lowerCAmelCase ,help="""Optional fine-tuning task name if the TF model was a finetuned model.""" ,)
train_parser.set_defaults(func=_lowerCAmelCase )
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,*_lowerCAmelCase ,):
lowerCamelCase__ = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(F'''Loading model {model_type}''' )
lowerCamelCase__ = model_type
lowerCamelCase__ = tf_checkpoint
lowerCamelCase__ = pytorch_dump_output
lowerCamelCase__ = config
lowerCamelCase__ = finetuning_task_name
def UpperCamelCase_ ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
if "ckpt" in self._tf_checkpoint.lower():
lowerCamelCase__ = self._tf_checkpoint
lowerCamelCase__ = """"""
else:
lowerCamelCase__ = self._tf_checkpoint
lowerCamelCase__ = """"""
convert_transfo_xl_checkpoint_to_pytorch(
_lowerCAmelCase ,self._config ,self._pytorch_dump_output ,_lowerCAmelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint ,self._config ,self._pytorch_dump_output ,self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 50
|
'''simple docstring'''
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__A : Optional[int] = logging.get_logger("transformers.models.speecht5")
__A : Optional[Any] = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__A : List[Any] = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__A : List[str] = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__A : int = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__A : Optional[int] = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__A : Optional[int] = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__A : Optional[Any] = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__A : Dict = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__A : List[str] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__A : Dict = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A : Optional[int] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A : List[Any] = []
__A : Tuple = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__A : str = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__A : Dict = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__A : Union[str, Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def lowercase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
for attribute in key.split('''.''' ):
_UpperCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
_UpperCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
_UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
elif weight_type == "running_mean":
_UpperCAmelCase = value
elif weight_type == "running_var":
_UpperCAmelCase = value
elif weight_type == "num_batches_tracked":
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_UpperCAmelCase , _UpperCAmelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = []
if task == "s2t":
_UpperCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
_UpperCAmelCase = MAPPING_S2T
_UpperCAmelCase = IGNORE_KEYS_S2T
elif task == "t2s":
_UpperCAmelCase = None
_UpperCAmelCase = MAPPING_T2S
_UpperCAmelCase = IGNORE_KEYS_T2S
elif task == "s2s":
_UpperCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
_UpperCAmelCase = MAPPING_S2S
_UpperCAmelCase = IGNORE_KEYS_S2S
else:
raise ValueError(f'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info(f'{name} was ignored' )
continue
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
_UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_UpperCAmelCase , _UpperCAmelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
_UpperCAmelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(_SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
_UpperCAmelCase = mapped_key.replace('''*''' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
_UpperCAmelCase = '''weight_g'''
elif "weight_v" in name:
_UpperCAmelCase = '''weight_v'''
elif "bias" in name:
_UpperCAmelCase = '''bias'''
elif "weight" in name:
_UpperCAmelCase = '''weight'''
elif "running_mean" in name:
_UpperCAmelCase = '''running_mean'''
elif "running_var" in name:
_UpperCAmelCase = '''running_var'''
elif "num_batches_tracked" in name:
_UpperCAmelCase = '''num_batches_tracked'''
else:
_UpperCAmelCase = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f'Unused weights: {unused_weights}' )
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = full_name.split('''conv_layers.''' )[-1]
_UpperCAmelCase = name.split('''.''' )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_UpperCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_UpperCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_UpperCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_UpperCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : List[str]=None , ):
'''simple docstring'''
if config_path is not None:
_UpperCAmelCase = SpeechTaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = SpeechTaConfig()
if task == "s2t":
_UpperCAmelCase = config.max_text_positions
_UpperCAmelCase = SpeechTaForSpeechToText(_SCREAMING_SNAKE_CASE )
elif task == "t2s":
_UpperCAmelCase = 1876
_UpperCAmelCase = 600
_UpperCAmelCase = config.max_speech_positions
_UpperCAmelCase = SpeechTaForTextToSpeech(_SCREAMING_SNAKE_CASE )
elif task == "s2s":
_UpperCAmelCase = 1876
_UpperCAmelCase = config.max_speech_positions
_UpperCAmelCase = SpeechTaForSpeechToSpeech(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'Unknown task name: {task}' )
if vocab_path:
_UpperCAmelCase = SpeechTaTokenizer(_SCREAMING_SNAKE_CASE , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken('''<mask>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
_UpperCAmelCase = SpeechTaFeatureExtractor()
_UpperCAmelCase = SpeechTaProcessor(tokenizer=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE )
recursively_load_weights(fairseq_checkpoint['''model'''] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__A : List[Any] = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 719
|
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__A : str = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : bool , __UpperCamelCase : str = None , __UpperCamelCase : list = None )->int:
_UpperCAmelCase = None
_UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
_UpperCAmelCase = os.path.abspath('''examples''' )
for item in os.listdir(__UpperCamelCase ):
if item not in EXCLUDE_EXAMPLES:
_UpperCAmelCase = os.path.join(__UpperCamelCase , __UpperCamelCase )
if os.path.isfile(__UpperCamelCase ) and ".py" in item_path:
with self.subTest(
tested_script=__UpperCamelCase , feature_script=__UpperCamelCase , tested_section='''main()''' if parser_only else '''training_function()''' , ):
_UpperCAmelCase = compare_against_test(
os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = '''\n'''.join(__UpperCamelCase )
if special_strings is not None:
for string in special_strings:
_UpperCAmelCase = diff.replace(__UpperCamelCase , '''''' )
self.assertEqual(__UpperCamelCase , '''''' )
def lowercase__ ( self : Tuple )->Any:
self.one_complete_example('''complete_nlp_example.py''' , __UpperCamelCase )
self.one_complete_example('''complete_nlp_example.py''' , __UpperCamelCase )
def lowercase__ ( self : Optional[Any] )->int:
_UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
_UpperCAmelCase = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.one_complete_example('''complete_cv_example.py''' , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""})
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = False
@classmethod
def lowercase__ ( cls : Optional[int] )->Optional[Any]:
super().setUpClass()
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowercase__ ( cls : Dict )->Any:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowercase__ ( self : Optional[int] )->Any:
_UpperCAmelCase = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def lowercase__ ( self : Optional[int] )->Optional[int]:
_UpperCAmelCase = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
_UpperCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def lowercase__ ( self : Optional[Any] )->List[Any]:
_UpperCAmelCase = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
_UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__UpperCamelCase )
self.assertNotIn('''epoch 0:''' , __UpperCamelCase )
self.assertIn('''epoch 1:''' , __UpperCamelCase )
def lowercase__ ( self : List[str] )->str:
_UpperCAmelCase = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
_UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__UpperCamelCase )
if torch.cuda.is_available():
_UpperCAmelCase = torch.cuda.device_count()
else:
_UpperCAmelCase = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __UpperCamelCase )
self.assertIn('''epoch 1:''' , __UpperCamelCase )
else:
self.assertIn('''epoch 0:''' , __UpperCamelCase )
self.assertIn('''epoch 1:''' , __UpperCamelCase )
@slow
def lowercase__ ( self : Dict )->List[Any]:
_UpperCAmelCase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
_UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__UpperCamelCase )
_UpperCAmelCase = re.findall('''({.+})''' , __UpperCamelCase )
_UpperCAmelCase = [r for r in results if '''accuracy''' in r][-1]
_UpperCAmelCase = ast.literal_eval(__UpperCamelCase )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def lowercase__ ( self : Any )->List[Any]:
_UpperCAmelCase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : Optional[int] )->Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
_UpperCAmelCase = F'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''tracking''' ) ) )
def lowercase__ ( self : Dict )->Dict:
_UpperCAmelCase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def lowercase__ ( self : Union[str, Any] )->Tuple:
_UpperCAmelCase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 95
| 0
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowercase = '''pt'''
elif is_tf_available():
lowercase = '''tf'''
else:
lowercase = '''jax'''
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : List[str] = ByTaTokenizer
snake_case__ : Tuple = False
def a_ ( self ):
super().setUp()
__SCREAMING_SNAKE_CASE : Optional[Any] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a_ ( self ):
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def a_ ( self , **a__ ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self , a__ , a__=False , a__=20 , a__=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__SCREAMING_SNAKE_CASE : Any = []
for i in range(len(a__ ) ):
try:
__SCREAMING_SNAKE_CASE : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=a__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__SCREAMING_SNAKE_CASE : Tuple = list(filter(lambda a__ : re.match(R"^[ a-zA-Z]+$" , t[1] ) , a__ ) )
__SCREAMING_SNAKE_CASE : Dict = list(filter(lambda a__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=a__ ) , a__ ) )
if max_length is not None and len(a__ ) > max_length:
__SCREAMING_SNAKE_CASE : int = toks[:max_length]
if min_length is not None and len(a__ ) < min_length and len(a__ ) > 0:
while len(a__ ) < min_length:
__SCREAMING_SNAKE_CASE : Optional[Any] = toks + toks
# toks_str = [t[1] for t in toks]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__ )
if " " not in output_txt and len(a__ ) > 1:
__SCREAMING_SNAKE_CASE : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a__ )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a__ )
)
if with_prefix_space:
__SCREAMING_SNAKE_CASE : Any = " " + output_txt
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
return output_txt, output_ids
def a_ ( self ):
__SCREAMING_SNAKE_CASE : int = self.ta_base_tokenizer
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
__SCREAMING_SNAKE_CASE : int = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"] , batch_without_eos_added["input_ids"] )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Tuple = self.ta_base_tokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = "Unicode €."
__SCREAMING_SNAKE_CASE : Tuple = tokenizer(a__ )
__SCREAMING_SNAKE_CASE : int = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"] , a__ )
# decoding
__SCREAMING_SNAKE_CASE : str = tokenizer.decode(a__ )
self.assertEqual(a__ , "Unicode €.</s>" )
__SCREAMING_SNAKE_CASE : int = tokenizer("e è é ê ë" )
__SCREAMING_SNAKE_CASE : Optional[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"] , a__ )
# decoding
__SCREAMING_SNAKE_CASE : Any = tokenizer.decode(a__ )
self.assertEqual(a__ , "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "e è é ê ë</s>" )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.ta_base_tokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
__SCREAMING_SNAKE_CASE : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
__SCREAMING_SNAKE_CASE : int = tokenizer(a__ , padding=a__ , return_tensors=a__ )
self.assertIsInstance(a__ , a__ )
if FRAMEWORK != "jax":
__SCREAMING_SNAKE_CASE : int = list(batch.input_ids.numpy()[0] )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a__ , a__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = self.ta_base_tokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
__SCREAMING_SNAKE_CASE : Any = tokenizer(a__ , padding=a__ , return_tensors=a__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , a__ )
self.assertIn("attention_mask" , a__ )
self.assertNotIn("decoder_input_ids" , a__ )
self.assertNotIn("decoder_attention_mask" , a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.ta_base_tokenizer
__SCREAMING_SNAKE_CASE : int = [
"Summary of the text.",
"Another summary.",
]
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer(
text_target=a__ , max_length=32 , padding="max_length" , truncation=a__ , return_tensors=a__ )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[int] = self.ta_base_tokenizer
__SCREAMING_SNAKE_CASE : Tuple = ["A long paragraph for summarization. </s>"]
__SCREAMING_SNAKE_CASE : List[Any] = ["Summary of the text. </s>"]
# fmt: off
__SCREAMING_SNAKE_CASE : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
__SCREAMING_SNAKE_CASE : str = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(a__ , text_target=a__ )
self.assertEqual(a__ , batch["input_ids"][0] )
self.assertEqual(a__ , batch["labels"][0] )
def a_ ( self ):
# safety check on max_len default value so we are sure the test works
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : Union[str, Any] = " He is very happy, UNwant\u00E9d,running"
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(a__ , add_special_tokens=a__ )
tokenizer.save_pretrained(a__ )
__SCREAMING_SNAKE_CASE : int = tokenizer.__class__.from_pretrained(a__ )
__SCREAMING_SNAKE_CASE : Any = after_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
shutil.rmtree(a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__SCREAMING_SNAKE_CASE : Dict = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : Optional[int] = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(a__ , add_special_tokens=a__ )
tokenizer.save_pretrained(a__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.__class__.from_pretrained(a__ )
__SCREAMING_SNAKE_CASE : List[Any] = after_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__SCREAMING_SNAKE_CASE : int = tokenizer.__class__.from_pretrained(a__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Any = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a__ )
with open(os.path.join(a__ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
__SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(a__ )
with open(os.path.join(a__ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
__SCREAMING_SNAKE_CASE : int = json.load(a__ )
__SCREAMING_SNAKE_CASE : List[Any] = [f'<extra_id_{i}>' for i in range(125 )]
__SCREAMING_SNAKE_CASE : Any = added_tokens_extra_ids + [
"an_additional_special_token"
]
__SCREAMING_SNAKE_CASE : Tuple = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(a__ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(a__ , a__ )
with open(os.path.join(a__ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(a__ , a__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__SCREAMING_SNAKE_CASE : str = tokenizer_class.from_pretrained(
a__ , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__SCREAMING_SNAKE_CASE : Optional[int] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=a__ )]
__SCREAMING_SNAKE_CASE : List[str] = tokenizer_class.from_pretrained(
a__ , additional_special_tokens=a__ , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_class.from_pretrained(a__ )
self.assertTrue(tokenizer.decode([255] ) == "" )
def a_ ( self ):
pass
def a_ ( self ):
pass
def a_ ( self ):
pass
def a_ ( self ):
pass
def a_ ( self ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizers(fast=a__ , do_lower_case=a__ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__SCREAMING_SNAKE_CASE : Any = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
__SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_tokens_to_string(a__ )
self.assertIsInstance(a__ , a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
__SCREAMING_SNAKE_CASE : List[str] = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(
a__ , skip_special_tokens=a__ )
for attr in attributes_list:
setattr(a__ , attr + "_id" , a__ )
self.assertEqual(getattr(a__ , a__ ) , a__ )
self.assertEqual(getattr(a__ , attr + "_id" ) , a__ )
setattr(a__ , attr + "_id" , a__ )
self.assertEqual(getattr(a__ , a__ ) , a__ )
self.assertEqual(getattr(a__ , attr + "_id" ) , a__ )
setattr(a__ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(a__ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(a__ , "additional_special_tokens_ids" ) , [] )
setattr(a__ , "additional_special_tokens_ids" , [token_id_to_test_setters] )
self.assertListEqual(getattr(a__ , "additional_special_tokens" ) , [token_to_test_setters] )
self.assertListEqual(getattr(a__ , "additional_special_tokens_ids" ) , [token_id_to_test_setters] )
| 211
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger()
@dataclass
class __lowerCamelCase :
'''simple docstring'''
snake_case__ : nn.Module
snake_case__ : List[nn.Module] = field(default_factory=__SCREAMING_SNAKE_CASE )
snake_case__ : list = field(default_factory=__SCREAMING_SNAKE_CASE )
def a_ ( self , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : str = len(list(m.modules() ) ) == 1 or isinstance(a__ , nn.Convad ) or isinstance(a__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a__ )
def __call__( self , a__ ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a__ )
[x.remove() for x in self.handles]
return self
@property
def a_ ( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda a__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __lowerCamelCase :
'''simple docstring'''
snake_case__ : nn.Module
snake_case__ : nn.Module
snake_case__ : int = 1
snake_case__ : List = field(default_factory=__SCREAMING_SNAKE_CASE )
snake_case__ : List = field(default_factory=__SCREAMING_SNAKE_CASE )
snake_case__ : bool = True
def __call__( self , a__ ):
__SCREAMING_SNAKE_CASE : List[Any] = Tracker(self.dest )(a__ ).parametrized
__SCREAMING_SNAKE_CASE : str = Tracker(self.src )(a__ ).parametrized
__SCREAMING_SNAKE_CASE : Tuple = list(filter(lambda a__ : type(a__ ) not in self.src_skip , a__ ) )
__SCREAMING_SNAKE_CASE : List[Any] = list(filter(lambda a__ : type(a__ ) not in self.dest_skip , a__ ) )
if len(a__ ) != len(a__ ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(a__ )} operations while'
f' destination module has {len(a__ )}.' )
for dest_m, src_m in zip(a__ , a__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , a__ ):
super().__init__()
__SCREAMING_SNAKE_CASE : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), f'Unexpected layer name {k}'
__SCREAMING_SNAKE_CASE : str = len(a__ ) + 1
feature_blocks.append((f'res{block_index}', v) )
__SCREAMING_SNAKE_CASE : Tuple = nn.ModuleDict(a__ )
def a_ ( self , a__ ):
return get_trunk_forward_outputs(
a__ , out_feat_keys=a__ , feature_blocks=self._feature_blocks , )
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def a_ ( self , a__ ):
__SCREAMING_SNAKE_CASE : List[str] = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , a__ ):
# default to timm!
if x not in self:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.convert_name_to_timm(a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = partial(lambda: (timm.create_model(a__ , pretrained=a__ ).eval(), None) )
else:
__SCREAMING_SNAKE_CASE : List[Any] = super().__getitem__(a__ )
return val
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __getitem__( self , a__ ):
if "seer" in x and "in1k" not in x:
__SCREAMING_SNAKE_CASE : Any = RegNetModel
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = RegNetForImageClassification
return val
def __A ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Tuple[str, str]] ):
"""simple docstring"""
for from_key, to_key in keys:
__SCREAMING_SNAKE_CASE : Optional[int] = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}' )
return to_state_dict
def __A ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Callable[[], nn.Module] , _SCREAMING_SNAKE_CASE : Callable[[], nn.Module] , _SCREAMING_SNAKE_CASE : RegNetConfig , _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : bool = True , ):
"""simple docstring"""
print(f'Converting {name}...' )
with torch.no_grad():
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = from_model_func()
__SCREAMING_SNAKE_CASE : Any = our_model_func(_SCREAMING_SNAKE_CASE ).eval()
__SCREAMING_SNAKE_CASE : Any = ModuleTransfer(src=_SCREAMING_SNAKE_CASE , dest=_SCREAMING_SNAKE_CASE , raise_if_mismatch=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : int = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(_SCREAMING_SNAKE_CASE )
if from_state_dict is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__SCREAMING_SNAKE_CASE : Tuple = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
__SCREAMING_SNAKE_CASE : str = manually_copy_vissl_head(_SCREAMING_SNAKE_CASE , our_model.state_dict() , _SCREAMING_SNAKE_CASE )
our_model.load_state_dict(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[int] = our_model(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[str] = (
our_outputs.logits if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else our_outputs.last_hidden_state
)
__SCREAMING_SNAKE_CASE : Dict = from_model(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Tuple = from_output[-1] if type(_SCREAMING_SNAKE_CASE ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__SCREAMING_SNAKE_CASE : str = our_outputs.hidden_states[-1]
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE : Optional[int] = 2_2_4 if "seer" not in name else 3_8_4
# we can use the convnext one
__SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
print(f'Pushed {name}' )
def __A ( _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : str = None , _SCREAMING_SNAKE_CASE : bool = True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = "imagenet-1k-id2label.json"
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1_0_0_0
__SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels)
__SCREAMING_SNAKE_CASE : Tuple = "huggingface/label-files"
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : List[str] = json.load(open(cached_download(hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) ) , "r" ) )
__SCREAMING_SNAKE_CASE : Any = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Optional[int] = idalabel
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Union[str, Any] = partial(_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : str = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 1_2] , hidden_sizes=[3_2, 6_4, 1_6_0, 3_8_4] , groups_width=1_6 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[4_8, 9_6, 2_4_0, 5_2_8] , groups_width=2_4 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[6_4, 1_2_8, 2_8_8, 6_7_2] , groups_width=1_6 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 2] , hidden_sizes=[7_2, 1_6_8, 4_0_8, 9_1_2] , groups_width=2_4 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 1_5, 2] , hidden_sizes=[9_6, 1_9_2, 4_3_2, 1_0_0_8] , groups_width=4_8 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 1_4, 2] , hidden_sizes=[8_0, 2_4_0, 5_6_0, 1_3_6_0] , groups_width=4_0 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 3_9_2, 7_8_4, 1_6_2_4] , groups_width=5_6 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 1_5, 1] , hidden_sizes=[8_0, 2_4_0, 7_2_0, 1_9_2_0] , groups_width=1_2_0 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 1_3, 1] , hidden_sizes=[2_5_6, 5_1_2, 8_9_6, 2_0_4_8] , groups_width=1_2_8 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 1_3, 1] , hidden_sizes=[3_3_6, 6_7_2, 1_3_4_4, 2_5_2_0] , groups_width=1_6_8 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[4_8, 1_0_4, 2_0_8, 4_4_0] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[4_8, 1_1_2, 2_5_6, 6_0_8] , groups_width=1_6 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[6_4, 1_2_8, 3_2_0, 7_6_8] , groups_width=1_6 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 1_7, 2] , hidden_sizes=[4_8, 1_2_0, 3_3_6, 8_8_8] , groups_width=2_4 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 1_3, 1] , hidden_sizes=[7_2, 2_1_6, 5_7_6, 1_5_1_2] , groups_width=2_4 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 1_2, 2] , hidden_sizes=[1_2_8, 1_9_2, 5_1_2, 1_0_8_8] , groups_width=6_4 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 1_4, 2] , hidden_sizes=[1_4_4, 2_8_8, 5_7_6, 1_2_9_6] , groups_width=7_2 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 4_4_8, 8_9_6, 2_0_1_6] , groups_width=5_6 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 1_2_3_2, 3_0_2_4] , groups_width=1_1_2 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
}
__SCREAMING_SNAKE_CASE : Optional[int] = NameToOurModelFuncMap()
__SCREAMING_SNAKE_CASE : List[Any] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , model_dir=str(_SCREAMING_SNAKE_CASE ) , map_location="cpu" )
__SCREAMING_SNAKE_CASE : Dict = model_func()
# check if we have a head, if yes add it
__SCREAMING_SNAKE_CASE : Optional[Any] = files["classy_state_dict"]["base_model"]["model"]
__SCREAMING_SNAKE_CASE : int = model_state_dict["trunk"]
model.load_state_dict(_SCREAMING_SNAKE_CASE )
return model.eval(), model_state_dict["heads"]
# pretrained
__SCREAMING_SNAKE_CASE : Any = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__SCREAMING_SNAKE_CASE : int = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__SCREAMING_SNAKE_CASE : str = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__SCREAMING_SNAKE_CASE : Tuple = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
__SCREAMING_SNAKE_CASE : int = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__SCREAMING_SNAKE_CASE : int = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__SCREAMING_SNAKE_CASE : Optional[int] = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__SCREAMING_SNAKE_CASE : Tuple = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
_SCREAMING_SNAKE_CASE , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_SCREAMING_SNAKE_CASE , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
return config, expected_shape
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowercase = parser.parse_args()
lowercase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 211
| 1
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def is_in_circle(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> bool:
lowerCAmelCase : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowerCAmelCase : List[str] = mean(
int(is_in_circle(uniform(-1.0 ,1.0 ) ,uniform(-1.0 ,1.0 ) ) )
for _ in range(SCREAMING_SNAKE_CASE__ ) )
# The ratio of the area for circle to square is pi/4.
lowerCAmelCase : List[Any] = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = 0.0 ,SCREAMING_SNAKE_CASE__ = 1.0 ,):
'''simple docstring'''
return mean(
function_to_integrate(uniform(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) ) for _ in range(SCREAMING_SNAKE_CASE__ ) ) * (max_value - min_value)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = 0.0 ,SCREAMING_SNAKE_CASE__ = 1.0 ):
'''simple docstring'''
def identity_function(SCREAMING_SNAKE_CASE__ ) -> float:
return x
lowerCAmelCase : Union[str, Any] = area_under_curve_estimator(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : str = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print("""******************""" )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def function_to_integrate(SCREAMING_SNAKE_CASE__ ) -> float:
return sqrt(4.0 - x * x )
lowerCAmelCase : int = area_under_curve_estimator(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,0.0 ,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] ={
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] =[
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int =[
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693
| 1
|
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , ):
UpperCAmelCase_ = cipher_alphabet or [chr(lowerCAmelCase__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCAmelCase_ = {
"a": 0.08497,
"b": 0.01492,
"c": 0.02202,
"d": 0.04253,
"e": 0.11162,
"f": 0.02228,
"g": 0.02015,
"h": 0.06094,
"i": 0.07546,
"j": 0.00153,
"k": 0.01292,
"l": 0.04025,
"m": 0.02406,
"n": 0.06749,
"o": 0.07507,
"p": 0.01929,
"q": 0.00095,
"r": 0.07587,
"s": 0.06327,
"t": 0.09356,
"u": 0.02758,
"v": 0.00978,
"w": 0.02560,
"x": 0.00150,
"y": 0.01994,
"z": 0.00077,
}
else:
# Custom frequencies dictionary
UpperCAmelCase_ = frequencies_dict
if not case_sensitive:
UpperCAmelCase_ = ciphertext.lower()
# Chi squared statistic values
UpperCAmelCase_ = {}
# cycle through all of the shifts
for shift in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase_ = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCAmelCase_ = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowerCAmelCase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCAmelCase_ = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCAmelCase_ = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCAmelCase_ = decrypted_with_shift.lower().count(lowerCAmelCase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCAmelCase_ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCAmelCase_ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCAmelCase_ = decrypted_with_shift.count(lowerCAmelCase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCAmelCase_ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCAmelCase_ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCAmelCase_ = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowerCAmelCase__ ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCAmelCase_ = min(
lowerCAmelCase__ , key=lowerCAmelCase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 82
|
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase = 50_000
lowerCamelCase = 5_000
lowerCamelCase , lowerCamelCase = os.path.split(__file__)
lowerCamelCase = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i : i + batch_size]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i : i + batch_size]
def a__ ( ):
UpperCAmelCase_ = {"num examples": SPEED_TEST_N_EXAMPLES}
UpperCAmelCase_ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
UpperCAmelCase_ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
UpperCAmelCase_ = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
UpperCAmelCase_ = generate_example_dataset(
os.path.join(lowerCAmelCase__ , "dataset.arrow" ) , lowerCAmelCase__ , num_examples=lowerCAmelCase__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(lowerCAmelCase__ ) )
UpperCAmelCase_ = func(lowerCAmelCase__ , **lowerCAmelCase__ )
print("shuffling dataset" )
UpperCAmelCase_ = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(lowerCAmelCase__ ) )
UpperCAmelCase_ = func(
lowerCAmelCase__ , **lowerCAmelCase__ )
with open(lowerCAmelCase__ , "wb" ) as f:
f.write(json.dumps(lowerCAmelCase__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 82
| 1
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = tf.convert_to_tensor(
[
[
8.222_0991, # 3rd highest value; idx. 0
-0.562_0044,
5.2322_9752,
4.038_6393,
-6.879_8378,
-0.5478_5802,
-3.201_2153,
2.9277_7176,
1.8817_1953,
7.3534_1276, # 5th highest value; idx. 9
8.4320_7833, # 2nd highest value; idx. 10
-9.8571_1836,
-5.9620_9236,
-1.1303_9161,
-7.111_5294,
-0.836_9633,
-5.318_6408,
7.0642_7407,
0.8136_9344,
-0.8202_3817,
-5.917_9796,
0.5881_3443,
-6.9977_8438,
4.7155_1189,
-0.1877_1637,
7.4402_0759, # 4th highest value; idx. 25
9.3845_0987, # 1st highest value; idx. 26
2.1266_2941,
-9.3256_2038,
2.3565_2522,
], # cummulative prob of 5 highest values <= 0.6
[
0.5842_5518,
4.5313_9238,
-5.5751_0464,
-6.2803_0699,
-7.1952_9503,
-4.0212_2551,
1.3933_7037,
-6.0670_7057,
1.5948_0517,
-9.64_3119,
0.0390_7799,
0.6723_1762,
-8.8820_6726,
6.2711_5922, # 4th highest value; idx. 13
2.2852_0723,
4.8276_7506,
4.3042_1368,
8.827_5313, # 2nd highest value; idx. 17
5.4402_9958, # 5th highest value; idx. 18
-4.473_5794,
7.3857_9536, # 3rd highest value; idx. 20
-2.9105_1663,
2.6194_6077,
-2.567_4762,
-9.4895_9302,
-4.0292_2645,
-1.3541_6918,
9.6770_2323, # 1st highest value; idx. 27
-5.8947_8553,
1.8537_0467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
SCREAMING_SNAKE_CASE__ : List[str] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
SCREAMING_SNAKE_CASE__ : Any = tf.convert_to_tensor(
[8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above
SCREAMING_SNAKE_CASE__ : Tuple = tf_top_k_top_p_filtering(a , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
SCREAMING_SNAKE_CASE__ : Tuple = output[output != -float("inf" )]
SCREAMING_SNAKE_CASE__ : Dict = tf.cast(
tf.where(tf.not_equal(a , tf.constant(-float("inf" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(a , a , rtol=1E-12 )
tf.debugging.assert_equal(a , a )
@require_tf
class _a ( unittest.TestCase , lowercase__ ):
"""simple docstring"""
if is_tf_available():
snake_case_ = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def A_ ( self : Optional[Any] ) ->List[Any]:
# TF-only test: tf.saved_model export
SCREAMING_SNAKE_CASE__ : Optional[int] = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : List[Any] = 2
class _a ( tf.Module ):
"""simple docstring"""
def __init__( self : List[str] , a : Any ) ->List[str]:
super(a , self ).__init__()
SCREAMING_SNAKE_CASE__ : List[Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask" ),
) , jit_compile=a , )
def A_ ( self : Tuple , a : Optional[Any] , a : str ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple = self.model.generate(
input_ids=a , attention_mask=a , max_new_tokens=a , return_dict_in_generate=a , )
return {"sequences": outputs["sequences"]}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[2, 0], [1_02, 1_03]]
SCREAMING_SNAKE_CASE__ : List[str] = [[1, 0], [1, 1]]
SCREAMING_SNAKE_CASE__ : Optional[Any] = DummyModel(model=a )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a , a , signatures={"serving_default": dummy_model.serving} )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.saved_model.load(a ).signatures["serving_default"]
for batch_size in range(1 , len(a ) + 1 ):
SCREAMING_SNAKE_CASE__ : int = {
"input_ids": tf.constant(dummy_input_ids[:batch_size] ),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size] ),
}
SCREAMING_SNAKE_CASE__ : List[Any] = serving_func(**a )["sequences"]
SCREAMING_SNAKE_CASE__ : List[str] = test_model.generate(**a , max_new_tokens=a )
tf.debugging.assert_equal(a , a )
@slow
def A_ ( self : List[str] ) ->Any:
# TF-only test: tf.saved_model export
SCREAMING_SNAKE_CASE__ : Optional[int] = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE__ : Dict = 1
SCREAMING_SNAKE_CASE__ : Any = 2
class _a ( tf.Module ):
"""simple docstring"""
def __init__( self : List[str] , a : Union[str, Any] ) ->str:
super(a , self ).__init__()
SCREAMING_SNAKE_CASE__ : List[str] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask" ),
) , jit_compile=a , )
def A_ ( self : str , a : Tuple , a : List[Any] ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.model.generate(
input_ids=a , attention_mask=a , max_new_tokens=a , return_dict_in_generate=a , )
return {"sequences": outputs["sequences"]}
SCREAMING_SNAKE_CASE__ : Tuple = [[2], [1_02, 1_03]]
SCREAMING_SNAKE_CASE__ : str = [[1], [1, 1]]
SCREAMING_SNAKE_CASE__ : Optional[Any] = DummyModel(model=a )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a , a , signatures={"serving_default": dummy_model.serving} )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.saved_model.load(a ).signatures["serving_default"]
for input_row in range(len(a ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"input_ids": tf.constant([dummy_input_ids[input_row]] ),
"attention_mask": tf.constant([dummy_attention_masks[input_row]] ),
}
SCREAMING_SNAKE_CASE__ : List[Any] = serving_func(**a )["sequences"]
SCREAMING_SNAKE_CASE__ : Optional[int] = test_model.generate(**a , max_new_tokens=a )
tf.debugging.assert_equal(a , a )
@slow
@require_tensorflow_text
def A_ ( self : Tuple ) ->List[str]:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=a )
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int ) ->str:
super().__init__()
SCREAMING_SNAKE_CASE__ : Optional[Any] = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(a , "spiece.model" ) , "rb" ).read() )
SCREAMING_SNAKE_CASE__ : str = TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5" )
def A_ ( self : Any , a : Union[str, Any] , *a : Optional[int] , **a : Any ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer.tokenize(a )
SCREAMING_SNAKE_CASE__ : int = text.pad_model_inputs(
a , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
SCREAMING_SNAKE_CASE__ : List[Any] = self.model.generate(input_ids=a , attention_mask=a )
return self.tokenizer.detokenize(a )
SCREAMING_SNAKE_CASE__ : Tuple = CompleteSentenceTransformer()
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs" )
SCREAMING_SNAKE_CASE__ : List[Any] = complete_model(a )
SCREAMING_SNAKE_CASE__ : List[str] = tf.keras.Model(a , a )
keras_model.save(a )
def A_ ( self : Dict ) ->Optional[int]:
# Has PT equivalent: this test relies on random sampling
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 10,
"temperature": 0.7,
}
SCREAMING_SNAKE_CASE__ : Dict = 14
SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE__ : int = "Hello, my dog is cute and"
SCREAMING_SNAKE_CASE__ : int = tokenizer(a , return_tensors="tf" )
SCREAMING_SNAKE_CASE__ : Any = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = model.generate(**a , eos_token_id=a , **a )
self.assertTrue(expectation == len(generated_tokens[0] ) )
SCREAMING_SNAKE_CASE__ : str = [6_38, 1_98]
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.generate(**a , eos_token_id=a , **a )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def A_ ( self : Optional[int] ) ->Dict:
# Has PT equivalent: ample use of framework-specific code
SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart" )
SCREAMING_SNAKE_CASE__ : str = "Hugging Face is a technology company based in New York and Paris."
SCREAMING_SNAKE_CASE__ : str = bart_tokenizer(a , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart" )
SCREAMING_SNAKE_CASE__ : Optional[int] = bart_model.generate(a ).numpy()
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Optional[Any] , a : List[str] , a : Dict=None , **a : str ) ->Optional[int]:
return super().call(a , **a )
SCREAMING_SNAKE_CASE__ : Dict = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bart_model.generate(a , foo="bar" ).numpy()
self.assertTrue(np.array_equal(a , a ) )
class _a ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def A_ ( self : Any , a : Tuple , **a : Union[str, Any] ) ->Any:
return super().call(a , **a )
SCREAMING_SNAKE_CASE__ : Optional[int] = FakeEncoder(bart_model.config , bart_model.model.shared )
SCREAMING_SNAKE_CASE__ : Tuple = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
SCREAMING_SNAKE_CASE__ : Tuple = bart_model.generate(a ).numpy()
with self.assertRaises(a ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(a , foo="bar" )
| 715
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase :int = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["pixel_values"]
def __init__( self : int , a : bool = True , a : Optional[Dict[str, int]] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : List[str] , ) ->None:
super().__init__(**a )
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_56}
SCREAMING_SNAKE_CASE__ : Any = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : int = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[Any] = crop_size
SCREAMING_SNAKE_CASE__ : Any = do_rescale
SCREAMING_SNAKE_CASE__ : Any = rescale_factor
SCREAMING_SNAKE_CASE__ : int = do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def A_ ( self : List[Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(a )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def A_ ( self : Optional[int] , a : np.ndarray , a : float , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict ) ->np.ndarray:
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self : Union[str, Any] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) ->np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self : Tuple , a : ImageInput , a : Optional[bool] = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a : Any , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Tuple = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[str] = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Dict = [self.normalize(image=a , mean=a , std=a ) for image in images]
SCREAMING_SNAKE_CASE__ : Dict = [to_channel_dimension_format(a , a ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 26
| 0
|
'''simple docstring'''
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = val
__A : Tuple = None
__A : Any = None
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
__A : Union[str, Any] = Node(_UpperCAmelCase)
else:
self.left.insert(_UpperCAmelCase)
elif val > self.val:
if self.right is None:
__A : Dict = Node(_UpperCAmelCase)
else:
self.right.insert(_UpperCAmelCase)
else:
__A : str = val
def _lowerCAmelCase ( __snake_case : Tuple , __snake_case : int ) -> Union[str, Any]:
# Recursive traversal
if root:
inorder(root.left , __snake_case )
res.append(root.val )
inorder(root.right , __snake_case )
def _lowerCAmelCase ( __snake_case : List[str] ) -> Union[str, Any]:
# Build BST
if len(__snake_case ) == 0:
return arr
__A : List[str] = Node(arr[0] )
for i in range(1 , len(__snake_case ) ):
root.insert(arr[i] )
# Traverse BST in order.
__A : Tuple = []
inorder(__snake_case , __snake_case )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 8
|
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCAmelCase ( __snake_case : str , __snake_case : str , **__snake_case : List[Any] ) -> Any:
__A : Optional[Any] = AutoConfig.from_pretrained(__snake_case , **__snake_case )
__A : int = AutoModelForSeqaSeqLM.from_config(__snake_case )
model.save_pretrained(__snake_case )
AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 8
| 1
|
import gc
import threading
import time
import psutil
import torch
class __lowercase :
def __init__( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = psutil.Process()
UpperCAmelCase = False
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase = -1
while True:
UpperCAmelCase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = True
UpperCAmelCase = threading.Thread(target=self.peak_monitor )
UpperCAmelCase = True
self.thread.start()
def _lowercase ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = False
self.thread.join()
return self.cpu_memory_peak
__a = PeakCPUMemory()
def _UpperCamelCase ( ) ->Any:
# Time
UpperCAmelCase = {"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCAmelCase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCAmelCase = torch.cuda.memory_allocated(SCREAMING_SNAKE_CASE_ )
torch.cuda.reset_peak_memory_stats()
return measures
def _UpperCamelCase ( lowerCAmelCase_ ) ->Optional[int]:
# Time
UpperCAmelCase = {"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCAmelCase = (psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**2_0
UpperCAmelCase = (cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**2_0
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCAmelCase = (torch.cuda.memory_allocated(SCREAMING_SNAKE_CASE_ ) - start_measures[str(SCREAMING_SNAKE_CASE_ )]) / 2**2_0
UpperCAmelCase = (torch.cuda.max_memory_allocated(SCREAMING_SNAKE_CASE_ ) - start_measures[str(SCREAMING_SNAKE_CASE_ )]) / 2**2_0
return measures
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Tuple:
print(F"""{description}:""" )
print(F"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(SCREAMING_SNAKE_CASE_ )]:.2f}MiB""" )
UpperCAmelCase = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" )
| 715
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
UpperCamelCase = ViTImageProcessor if is_vision_available() else None
@property
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = (3, 3_2, 1_2_8)
UpperCAmelCase = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
UpperCAmelCase = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + """\n""" )
UpperCAmelCase = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 3_2, """width""": 1_2_8},
}
UpperCAmelCase = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def _lowercase ( self : Optional[Any] , **__lowerCamelCase : Union[str, Any] ) -> int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _lowercase ( self : int , **__lowerCamelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _lowercase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Any ) -> str:
"""simple docstring"""
UpperCAmelCase = np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )
UpperCAmelCase = Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) )
return image_input
def _lowercase ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = MgpstrProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def _lowercase ( self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = MgpstrProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCAmelCase = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
UpperCAmelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def _lowercase ( self : int ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = MgpstrProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(__lowerCamelCase , return_tensors="""np""" )
UpperCAmelCase = processor(images=__lowerCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowercase ( self : str ) -> int:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = MgpstrProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCAmelCase = """test"""
UpperCAmelCase = processor(text=__lowerCamelCase )
UpperCAmelCase = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = MgpstrProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCAmelCase = """test"""
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def _lowercase ( self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = MgpstrProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase = processor.char_decode(__lowerCamelCase )
UpperCAmelCase = tokenizer.batch_decode(__lowerCamelCase )
UpperCAmelCase = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = MgpstrProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCAmelCase = None
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def _lowercase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = MgpstrProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCAmelCase = torch.randn(1 , 2_7 , 3_8 )
UpperCAmelCase = torch.randn(1 , 2_7 , 5_0_2_5_7 )
UpperCAmelCase = torch.randn(1 , 2_7 , 3_0_5_2_2 )
UpperCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 627
| 0
|
import functools
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
# Validation
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(UpperCAmelCase__ ) != 3 or not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(UpperCAmelCase__ ) == 0:
return 0
if min(UpperCAmelCase__ ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(UpperCAmelCase__ ) >= 3_6_6:
raise ValueError("""All days elements should be less than 366""" )
lowercase_ = set(UpperCAmelCase__ )
@functools.cache
def dynamic_programming(UpperCAmelCase__ ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 412
|
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = AutoencoderKL
__SCREAMING_SNAKE_CASE : Optional[Any] = 'sample'
__SCREAMING_SNAKE_CASE : Any = 1e-2
@property
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = 4
lowercase_ = 3
lowercase_ = (32, 32)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
return {"sample": image}
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return (3, 32, 32)
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
lowercase_ = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
pass
@unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.prepare_init_args_and_inputs_for_common()
lowercase_ = self.model_class(**UpperCamelCase__ )
model.to(UpperCamelCase__ )
assert not model.is_gradient_checkpointing and model.training
lowercase_ = model(**UpperCamelCase__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowercase_ = torch.randn_like(UpperCamelCase__ )
lowercase_ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowercase_ = self.model_class(**UpperCamelCase__ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(UpperCamelCase__ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowercase_ = model_a(**UpperCamelCase__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowercase_ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
lowercase_ = dict(model.named_parameters() )
lowercase_ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCamelCase__ )
lowercase_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
lowercase_ = model.to(UpperCamelCase__ )
model.eval()
if torch_device == "mps":
lowercase_ = torch.manual_seed(0 )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
lowercase_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase_ = image.to(UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ , sample_posterior=UpperCamelCase__ , generator=UpperCamelCase__ ).sample
lowercase_ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowercase_ = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
lowercase_ = torch.tensor(
[-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] )
else:
lowercase_ = torch.tensor(
[-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] )
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2 ) )
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return F'''gaussian_noise_s={seed}_shape={"_".join([str(UpperCamelCase__ ) for s in shape] )}.npy'''
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : str=0 , UpperCamelCase__ : str=(4, 3, 512, 512) , UpperCamelCase__ : str=False ):
'''simple docstring'''
lowercase_ = torch.floataa if fpaa else torch.floataa
lowercase_ = torch.from_numpy(load_hf_numpy(self.get_file_format(UpperCamelCase__ , UpperCamelCase__ ) ) ).to(UpperCamelCase__ ).to(UpperCamelCase__ )
return image
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Tuple="CompVis/stable-diffusion-v1-4" , UpperCamelCase__ : Any=False ):
'''simple docstring'''
lowercase_ = """fp16""" if fpaa else None
lowercase_ = torch.floataa if fpaa else torch.floataa
lowercase_ = AutoencoderKL.from_pretrained(
UpperCamelCase__ , subfolder="""vae""" , torch_dtype=UpperCamelCase__ , revision=UpperCamelCase__ , )
model.to(UpperCamelCase__ ).eval()
return model
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Any=0 ):
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(UpperCamelCase__ )
return torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCamelCase__ )
lowercase_ = self.get_generator(UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ , generator=UpperCamelCase__ , sample_posterior=UpperCamelCase__ ).sample
assert sample.shape == image.shape
lowercase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase_ = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]],
[47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model(fpaa=UpperCamelCase__ )
lowercase_ = self.get_sd_image(UpperCamelCase__ , fpaa=UpperCamelCase__ )
lowercase_ = self.get_generator(UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ , generator=UpperCamelCase__ , sample_posterior=UpperCamelCase__ ).sample
assert sample.shape == image.shape
lowercase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase_ = torch.tensor(UpperCamelCase__ )
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ ).sample
assert sample.shape == image.shape
lowercase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase_ = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]],
[37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCamelCase__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase_ = model.decode(UpperCamelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase_ = sample[-1, -2:, :2, -2:].flatten().cpu()
lowercase_ = torch.tensor(UpperCamelCase__ )
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]],
[16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model(fpaa=UpperCamelCase__ )
lowercase_ = self.get_sd_image(UpperCamelCase__ , shape=(3, 4, 64, 64) , fpaa=UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model.decode(UpperCamelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase_ = torch.tensor(UpperCamelCase__ )
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : Dict ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model(fpaa=UpperCamelCase__ )
lowercase_ = self.get_sd_image(UpperCamelCase__ , shape=(3, 4, 64, 64) , fpaa=UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model.decode(UpperCamelCase__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase_ = model.decode(UpperCamelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCamelCase__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase_ = model.decode(UpperCamelCase__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase_ = model.decode(UpperCamelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]],
[47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]],
# fmt: on
] )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCamelCase__ )
lowercase_ = self.get_generator(UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model.encode(UpperCamelCase__ ).latent_dist
lowercase_ = dist.sample(generator=UpperCamelCase__ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowercase_ = sample[0, -1, -3:, -3:].flatten().cpu()
lowercase_ = torch.tensor(UpperCamelCase__ )
lowercase_ = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=UpperCamelCase__ )
| 412
| 1
|
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self :List[Any] ):
A = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
A = AutoTokenizer.from_pretrained("google/mt5-small" )
A = tokenizer("Hello there" , return_tensors="np" ).input_ids
A = tokenizer("Hi I am" , return_tensors="np" ).input_ids
A = shift_tokens_right(__UpperCamelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
A = model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase ).logits
A = optax.softmax_cross_entropy(__UpperCamelCase , onehot(__UpperCamelCase , logits.shape[-1] ) ).mean()
A = -(labels.shape[-1] * loss.item())
A = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 712
|
"""simple docstring"""
from __future__ import annotations
def A__ ( UpperCamelCase , UpperCamelCase ):
A = []
create_all_state(1 , UpperCamelCase , UpperCamelCase , [] , UpperCamelCase )
return result
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(UpperCamelCase , total_number - level + 2 ):
current_list.append(UpperCamelCase )
create_all_state(i + 1 , UpperCamelCase , level - 1 , UpperCamelCase , UpperCamelCase )
current_list.pop()
def A__ ( UpperCamelCase ):
for i in total_list:
print(*UpperCamelCase )
if __name__ == "__main__":
_snake_case : Optional[Any] = 4
_snake_case : Tuple = 2
_snake_case : int = generate_all_combinations(n, k)
print_all_state(total_list)
| 524
| 0
|
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowerCamelCase_ ( lowercase , unittest.TestCase ):
__lowercase : int = CpmAntTokenizer
__lowercase : Dict = False
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
_UpperCamelCase = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def lowercase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
_UpperCamelCase = """今天天气真好!"""
_UpperCamelCase = ["""今天""", """天气""", """真""", """好""", """!"""]
_UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
_UpperCamelCase = """今天天气真好!"""
_UpperCamelCase = [tokenizer.bos_token] + tokens
_UpperCamelCase = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
_UpperCamelCase = tokenizer.decode(A_ )
self.assertEqual(A_ , A_ )
| 147
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _SCREAMING_SNAKE_CASE ( A ):
__SCREAMING_SNAKE_CASE = '''camembert'''
def __init__( self , A_=3_05_22 , A_=7_68 , A_=12 , A_=12 , A_=30_72 , A_="gelu" , A_=0.1 , A_=0.1 , A_=5_12 , A_=2 , A_=0.0_2 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ):
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
_UpperCAmelCase : Optional[Any] = vocab_size
_UpperCAmelCase : int = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = max_position_embeddings
_UpperCAmelCase : Union[str, Any] = type_vocab_size
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Optional[Any] = layer_norm_eps
_UpperCAmelCase : int = position_embedding_type
_UpperCAmelCase : Tuple = use_cache
_UpperCAmelCase : Any = classifier_dropout
class _SCREAMING_SNAKE_CASE ( A ):
@property
def __snake_case( self ):
if self.task == "multiple-choice":
_UpperCAmelCase : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 643
| 0
|
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , ):
snake_case_ = parent
snake_case_ = 13
snake_case_ = 7
snake_case_ = 30
snake_case_ = self.seq_length + self.mem_len
snake_case_ = 15
snake_case_ = True
snake_case_ = True
snake_case_ = 99
snake_case_ = [10, 50, 80]
snake_case_ = 32
snake_case_ = 32
snake_case_ = 4
snake_case_ = 8
snake_case_ = 1_28
snake_case_ = 2
snake_case_ = 2
snake_case_ = None
snake_case_ = 1
snake_case_ = 0
snake_case_ = 3
snake_case_ = self.vocab_size - 1
snake_case_ = 0.01
def UpperCamelCase__ ( self ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def UpperCamelCase__ ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = TFTransfoXLModel(_UpperCAmelCase )
snake_case_ , snake_case_ = model(_UpperCAmelCase ).to_tuple()
snake_case_ = {'''input_ids''': input_ids_a, '''mems''': mems_a}
snake_case_ , snake_case_ = model(_UpperCAmelCase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = TFTransfoXLLMHeadModel(_UpperCAmelCase )
snake_case_ , snake_case_ = model(_UpperCAmelCase ).to_tuple()
snake_case_ = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
snake_case_ , snake_case_ = model(_UpperCAmelCase ).to_tuple()
snake_case_ , snake_case_ = model([input_ids_a, mems_a] ).to_tuple()
snake_case_ = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
snake_case_ , snake_case_ = model(_UpperCAmelCase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = TFTransfoXLForSequenceClassification(_UpperCAmelCase )
snake_case_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
snake_case_ = self.prepare_config_and_inputs()
((snake_case_) , (snake_case_) , (snake_case_) , (snake_case_)) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__snake_case = () if is_tf_available() else ()
__snake_case = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def UpperCamelCase__ ( self ):
snake_case_ = TFTransfoXLModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCAmelCase , d_embed=37 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
self.model_tester.set_seed()
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
self.model_tester.set_seed()
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
snake_case_ = model.get_output_embeddings()
assert isinstance(_UpperCAmelCase , tf.keras.layers.Layer )
snake_case_ = model.get_bias()
assert name is None
else:
snake_case_ = model.get_output_embeddings()
assert x is None
snake_case_ = model.get_bias()
assert name is None
def UpperCamelCase__ ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def UpperCamelCase__ ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFTransfoXLModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def UpperCamelCase__ ( self ):
pass
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def UpperCamelCase__ ( self ):
snake_case_ = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
snake_case_ = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
snake_case_ = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
snake_case_ = model.generate(_UpperCAmelCase , max_length=2_00 , do_sample=_UpperCAmelCase )
self.assertListEqual(output_ids[0].numpy().tolist() , _UpperCAmelCase )
| 704
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
UpperCAmelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
for attribute in key.split('''.''' ):
snake_case_ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
snake_case_ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
else:
snake_case_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
snake_case_ = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
snake_case_ = '''weight_g'''
elif "weight_v" in name:
snake_case_ = '''weight_v'''
elif "bias" in name:
snake_case_ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ = '''weight'''
else:
snake_case_ = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
snake_case_ = full_name.split('''conv_layers.''' )[-1]
snake_case_ = name.split('''.''' )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True )-> Dict:
"""simple docstring"""
if config_path is not None:
snake_case_ = UniSpeechSatConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
snake_case_ = UniSpeechSatConfig()
snake_case_ = ''''''
if is_finetuned:
snake_case_ = UniSpeechSatForCTC(SCREAMING_SNAKE_CASE )
else:
snake_case_ = UniSpeechSatForPreTraining(SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
snake_case_ = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 531
| 0
|
"""simple docstring"""
def UpperCAmelCase_ ( __a : int , __a : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_lowerCamelCase : Any = str(bin(__a ) )[2:] # remove the leading "0b"
_lowerCamelCase : int = str(bin(__a ) )[2:] # remove the leading "0b"
_lowerCamelCase : List[str] = max(len(__a ) , len(__a ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(__a ) , b_binary.zfill(__a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 437
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
a_ = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 437
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Dict ="""openai/whisper-base"""
UpperCamelCase__ : Tuple =(
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCamelCase__ : str ="""transcriber"""
UpperCamelCase__ : Any =WhisperProcessor
UpperCamelCase__ : int =WhisperForConditionalGeneration
UpperCamelCase__ : Any =["""audio"""]
UpperCamelCase__ : str =["""text"""]
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
return self.pre_processor(lowerCamelCase__ , return_tensors='pt' ).input_features
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
return self.model.generate(inputs=lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
return self.pre_processor.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )[0]
| 154
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : torch.FloatTensor
UpperCamelCase__ : torch.FloatTensor
class __A ( a , a ):
"""simple docstring"""
UpperCamelCase__ : List[str] =1
@register_to_config
def __init__( self , lowerCamelCase__ = 2000 , lowerCamelCase__ = 0.15 , lowerCamelCase__ = 0.01 , lowerCamelCase__ = 1_348.0 , lowerCamelCase__ = 1E-5 , lowerCamelCase__ = 1 , ):
"""simple docstring"""
__UpperCamelCase : List[str] =sigma_max
# setable values
__UpperCamelCase : Tuple =None
self.set_sigmas(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
return sample
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : List[Any] =sampling_eps if sampling_eps is not None else self.config.sampling_eps
__UpperCamelCase : Any =torch.linspace(1 , lowerCamelCase__ , lowerCamelCase__ , device=lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : List[str] =sigma_min if sigma_min is not None else self.config.sigma_min
__UpperCamelCase : Dict =sigma_max if sigma_max is not None else self.config.sigma_max
__UpperCamelCase : List[Any] =sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : int =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__UpperCamelCase : Tuple =torch.exp(torch.linspace(math.log(lowerCamelCase__ ) , math.log(lowerCamelCase__ ) , lowerCamelCase__ ) )
__UpperCamelCase : Union[str, Any] =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
__UpperCamelCase : List[Any] =timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__UpperCamelCase : Optional[int] =(timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__UpperCamelCase : Tuple =timesteps.to(self.discrete_sigmas.device )
__UpperCamelCase : Any =self.discrete_sigmas[timesteps].to(sample.device )
__UpperCamelCase : List[str] =self.get_adjacent_sigma(lowerCamelCase__ , lowerCamelCase__ ).to(sample.device )
__UpperCamelCase : Union[str, Any] =torch.zeros_like(lowerCamelCase__ )
__UpperCamelCase : List[str] =(sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__UpperCamelCase : List[str] =diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__UpperCamelCase : Optional[int] =diffusion.unsqueeze(-1 )
__UpperCamelCase : Optional[int] =drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__UpperCamelCase : Union[str, Any] =randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCamelCase__ , device=sample.device , dtype=sample.dtype )
__UpperCamelCase : List[str] =sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__UpperCamelCase : Union[str, Any] =prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase__ , prev_sample_mean=lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__UpperCamelCase : Optional[int] =randn_tensor(sample.shape , layout=sample.layout , generator=lowerCamelCase__ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__UpperCamelCase : Dict =torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__UpperCamelCase : Optional[Any] =torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__UpperCamelCase : Optional[Any] =(self.config.snr * noise_norm / grad_norm) ** 2 * 2
__UpperCamelCase : List[Any] =step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__UpperCamelCase : int =step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__UpperCamelCase : Optional[Any] =step_size.unsqueeze(-1 )
__UpperCamelCase : Any =sample + step_size * model_output
__UpperCamelCase : Optional[Any] =prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : List[Any] =timesteps.to(original_samples.device )
__UpperCamelCase : List[Any] =self.discrete_sigmas.to(original_samples.device )[timesteps]
__UpperCamelCase : Optional[int] =(
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase__ ) * sigmas[:, None, None, None]
)
__UpperCamelCase : Any =noise + original_samples
return noisy_samples
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 154
| 1
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = IFImgaImgSuperResolutionPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCamelCase ( self ):
return self._get_superresolution_dummy_components()
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Tuple = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __UpperCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __UpperCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCamelCase ( self ):
self._test_save_load_local()
def __UpperCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 38
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase__ ,unittest.TestCase ):
"""simple docstring"""
a_ = ConsistencyModelPipeline
a_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
a_ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet""" , )
return unet
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , )
return unet
def _lowerCAmelCase ( self , lowerCAmelCase_=False ):
'''simple docstring'''
if class_cond:
a_ : List[str] = self.dummy_cond_unet
else:
a_ : Dict = self.dummy_uncond_unet
# Default to CM multistep sampler
a_ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ : str = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ):
'''simple docstring'''
if str(lowerCAmelCase_ ).startswith("""mps""" ):
a_ : int = torch.manual_seed(lowerCAmelCase_ )
else:
a_ : Union[str, Any] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
a_ : Union[str, Any] = {
"""batch_size""": 1,
"""num_inference_steps""": None,
"""timesteps""": [22, 0],
"""generator""": generator,
"""output_type""": """np""",
}
return inputs
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : Optional[Any] = self.get_dummy_components()
a_ : Any = ConsistencyModelPipeline(**lowerCAmelCase_ )
a_ : Tuple = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase_ )
a_ : str = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
a_ : List[Any] = image[0, -3:, -3:, -1]
a_ : int = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : Union[str, Any] = self.get_dummy_components(class_cond=lowerCAmelCase_ )
a_ : Tuple = ConsistencyModelPipeline(**lowerCAmelCase_ )
a_ : str = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : Dict = self.get_dummy_inputs(lowerCAmelCase_ )
a_ : str = 0
a_ : Union[str, Any] = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
a_ : int = image[0, -3:, -3:, -1]
a_ : str = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : List[Any] = self.get_dummy_components()
a_ : Optional[int] = ConsistencyModelPipeline(**lowerCAmelCase_ )
a_ : List[str] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : Any = self.get_dummy_inputs(lowerCAmelCase_ )
a_ : List[Any] = 1
a_ : int = None
a_ : str = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
a_ : List[Any] = image[0, -3:, -3:, -1]
a_ : List[Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : str = self.get_dummy_components(class_cond=lowerCAmelCase_ )
a_ : Optional[Any] = ConsistencyModelPipeline(**lowerCAmelCase_ )
a_ : Union[str, Any] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : int = self.get_dummy_inputs(lowerCAmelCase_ )
a_ : Any = 1
a_ : Optional[int] = None
a_ : Optional[Any] = 0
a_ : str = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
a_ : str = image[0, -3:, -3:, -1]
a_ : int = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self , lowerCAmelCase_=0 , lowerCAmelCase_=False , lowerCAmelCase_="cpu" , lowerCAmelCase_=torch.floataa , lowerCAmelCase_=(1, 3, 64, 64) ):
'''simple docstring'''
a_ : List[str] = torch.manual_seed(lowerCAmelCase_ )
a_ : int = {
"""num_inference_steps""": None,
"""timesteps""": [22, 0],
"""class_labels""": 0,
"""generator""": generator,
"""output_type""": """np""",
}
if get_fixed_latents:
a_ : int = self.get_fixed_latents(seed=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ , shape=lowerCAmelCase_ )
a_ : str = latents
return inputs
def _lowerCAmelCase ( self , lowerCAmelCase_=0 , lowerCAmelCase_="cpu" , lowerCAmelCase_=torch.floataa , lowerCAmelCase_=(1, 3, 64, 64) ):
'''simple docstring'''
if type(lowerCAmelCase_ ) == str:
a_ : Dict = torch.device(lowerCAmelCase_ )
a_ : Dict = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
a_ : Dict = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
return latents
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Dict = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
a_ : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ : List[str] = ConsistencyModelPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(torch_device=lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : Union[str, Any] = self.get_inputs()
a_ : List[Any] = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
a_ : Dict = image[0, -3:, -3:, -1]
a_ : int = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
a_ : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ : List[Any] = ConsistencyModelPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(torch_device=lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : Tuple = self.get_inputs()
a_ : Optional[Any] = 1
a_ : List[str] = None
a_ : Optional[Any] = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
a_ : Any = image[0, -3:, -3:, -1]
a_ : Any = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : str = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
a_ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ : Tuple = ConsistencyModelPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(torch_device=lowerCAmelCase_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : str = self.get_inputs(get_fixed_latents=lowerCAmelCase_ , device=lowerCAmelCase_ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCAmelCase_ , enable_math=lowerCAmelCase_ , enable_mem_efficient=lowerCAmelCase_ ):
a_ : int = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
a_ : int = image[0, -3:, -3:, -1]
a_ : Optional[int] = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Tuple = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
a_ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ : Dict = ConsistencyModelPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(torch_device=lowerCAmelCase_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : List[str] = self.get_inputs(get_fixed_latents=lowerCAmelCase_ , device=lowerCAmelCase_ )
a_ : str = 1
a_ : Dict = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCAmelCase_ , enable_math=lowerCAmelCase_ , enable_mem_efficient=lowerCAmelCase_ ):
a_ : Optional[Any] = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
a_ : Optional[Any] = image[0, -3:, -3:, -1]
a_ : Optional[int] = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 577
| 0
|
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
a__ : Union[str, Any] = logging.getLogger(__name__)
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = bnb_quantization_config.load_in_abit
__SCREAMING_SNAKE_CASE = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
__SCREAMING_SNAKE_CASE = []
# custom device map
if isinstance(_lowercase , _lowercase ) and len(device_map.keys() ) > 1:
__SCREAMING_SNAKE_CASE = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
__SCREAMING_SNAKE_CASE = get_keys_to_not_convert(_lowercase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowercase )
__SCREAMING_SNAKE_CASE = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowercase )
# compatibility with peft
__SCREAMING_SNAKE_CASE = load_in_abit
__SCREAMING_SNAKE_CASE = load_in_abit
__SCREAMING_SNAKE_CASE = get_parameter_device(_lowercase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
__SCREAMING_SNAKE_CASE = replace_with_bnb_layers(_lowercase , _lowercase , modules_to_not_convert=_lowercase )
# convert param to the right dtype
__SCREAMING_SNAKE_CASE = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
__SCREAMING_SNAKE_CASE = name.replace(".weight" , "" ).replace(".bias" , "" )
__SCREAMING_SNAKE_CASE = getattr(_lowercase , _lowercase , _lowercase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowercase ):
param.to(_lowercase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
__SCREAMING_SNAKE_CASE = replace_with_bnb_layers(
_lowercase , _lowercase , modules_to_not_convert=_lowercase )
__SCREAMING_SNAKE_CASE = get_quantized_model_device_map(
_lowercase , _lowercase , _lowercase , max_memory=_lowercase , no_split_module_classes=_lowercase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
_lowercase , _lowercase , _lowercase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowercase , offload_state_dict=_lowercase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowercase , device_map=_lowercase , offload_dir=_lowercase )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ):
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
__SCREAMING_SNAKE_CASE = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(_lowercase , _lowercase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
__SCREAMING_SNAKE_CASE = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = special_dtypes
__SCREAMING_SNAKE_CASE = no_split_module_classes
__SCREAMING_SNAKE_CASE = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
__SCREAMING_SNAKE_CASE = get_balanced_memory(
_lowercase , low_zero=(device_map == "balanced_low_0") , max_memory=_lowercase , **_lowercase , )
__SCREAMING_SNAKE_CASE = max_memory
__SCREAMING_SNAKE_CASE = infer_auto_device_map(_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ):
# check if don't have any quantized module on the cpu
__SCREAMING_SNAKE_CASE = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
__SCREAMING_SNAKE_CASE = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ):
'''simple docstring'''
if modules_to_not_convert is None:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = _replace_with_bnb_layers(
_lowercase , _lowercase , _lowercase , _lowercase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = False
for name, module in model.named_children():
if current_key_name is None:
__SCREAMING_SNAKE_CASE = []
current_key_name.append(_lowercase )
if isinstance(_lowercase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
__SCREAMING_SNAKE_CASE = ".".join(_lowercase )
__SCREAMING_SNAKE_CASE = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
__SCREAMING_SNAKE_CASE = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
__SCREAMING_SNAKE_CASE = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowercase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
__SCREAMING_SNAKE_CASE = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
__SCREAMING_SNAKE_CASE = module.weight.data
if module.bias is not None:
__SCREAMING_SNAKE_CASE = module.bias.data
bnb_module.requires_grad_(_lowercase )
setattr(_lowercase , _lowercase , _lowercase )
__SCREAMING_SNAKE_CASE = True
if len(list(module.children() ) ) > 0:
__SCREAMING_SNAKE_CASE = _replace_with_bnb_layers(
_lowercase , _lowercase , _lowercase , _lowercase )
__SCREAMING_SNAKE_CASE = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
with init_empty_weights():
__SCREAMING_SNAKE_CASE = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
__SCREAMING_SNAKE_CASE = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase , _lowercase ):
__SCREAMING_SNAKE_CASE = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__SCREAMING_SNAKE_CASE = sum(_lowercase , [] )
__SCREAMING_SNAKE_CASE = len(_lowercase ) > 0
# Check if it is a base model
__SCREAMING_SNAKE_CASE = False
if hasattr(_lowercase , "base_model_prefix" ):
__SCREAMING_SNAKE_CASE = not hasattr(_lowercase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__SCREAMING_SNAKE_CASE = list(model.named_children() )
__SCREAMING_SNAKE_CASE = [list_modules[-1][0]]
# add last module together with tied weights
__SCREAMING_SNAKE_CASE = set(_lowercase ) - set(_lowercase )
__SCREAMING_SNAKE_CASE = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
__SCREAMING_SNAKE_CASE = [".weight", ".bias"]
__SCREAMING_SNAKE_CASE = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__SCREAMING_SNAKE_CASE = name.replace(_lowercase , "" )
filtered_module_names.append(_lowercase )
return filtered_module_names
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
for m in model.modules():
if isinstance(_lowercase , bnb.nn.Linearabit ):
return True
return False
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return next(parameter.parameters() ).device
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(_lowercase , _lowercase , 0 , dtype=_lowercase , value=_lowercase )
__SCREAMING_SNAKE_CASE = param_name
__SCREAMING_SNAKE_CASE = model
if "." in tensor_name:
__SCREAMING_SNAKE_CASE = tensor_name.split("." )
for split in splits[:-1]:
__SCREAMING_SNAKE_CASE = getattr(_lowercase , _lowercase )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
__SCREAMING_SNAKE_CASE = new_module
__SCREAMING_SNAKE_CASE = splits[-1]
# offload weights
__SCREAMING_SNAKE_CASE = False
offload_weight(module._parameters[tensor_name] , _lowercase , _lowercase , index=_lowercase )
if hasattr(module._parameters[tensor_name] , "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , _lowercase , index=_lowercase , )
else:
offload_weight(_lowercase , _lowercase , _lowercase , index=_lowercase )
offload_weight(_lowercase , param_name.replace("weight" , "SCB" ) , _lowercase , index=_lowercase )
set_module_tensor_to_device(_lowercase , _lowercase , "meta" , dtype=_lowercase , value=torch.empty(*param.size() ) )
| 709
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if len(lowerCAmelCase_ ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
__SCREAMING_SNAKE_CASE = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 553
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.