code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import datasets
__SCREAMING_SNAKE_CASE :List[Any] = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
__SCREAMING_SNAKE_CASE :Any = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
__SCREAMING_SNAKE_CASE :Dict = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : Optional[int] ) -> str:
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def lowercase ( self : str , snake_case_ : str , snake_case_ : int ):
return {"accuracy": simple_accuracy(snake_case_ , snake_case_ )}
| 22 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict , __lowercase : str , __lowercase : Optional[Any] , __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = TapasConfig.from_json_file(__lowercase )
# set absolute/relative position embeddings parameter
_UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = True
# hparam_utils.py hparams
_UpperCAmelCase = 0.66_4694
_UpperCAmelCase = 0.20_7951
_UpperCAmelCase = 0.12_1194
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = 0.035_2513
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = False
# hparam_utils.py hparams
_UpperCAmelCase = 36.4519
_UpperCAmelCase = 0.90_3421
_UpperCAmelCase = 222.088
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0.76_3141
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "TABFACT":
_UpperCAmelCase = TapasForSequenceClassification(config=__lowercase )
elif task == "MLM":
_UpperCAmelCase = TapasForMaskedLM(config=__lowercase )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase = TapasModel(config=__lowercase )
else:
raise ValueError(f'Task {task} not supported.' )
print(f'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__lowercase , __lowercase , __lowercase )
# Save pytorch-model (weights and configuration)
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowercase )
# Save tokenizer files
print(f'Save tokenizer files to {pytorch_dump_path}' )
_UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(__lowercase )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 22 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ =logging.get_logger(__name__)
class UpperCamelCase__ ( lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : List[str] = ["""pixel_values"""]
def __init__(self : Optional[Any] , snake_case_ : Union[str, Any] = True , snake_case_ : Any = None , snake_case_ : Optional[int] = PILImageResampling.BICUBIC , snake_case_ : Tuple = True , snake_case_ : Union[str, Any] = 1 / 2_5_5 , snake_case_ : str = True , snake_case_ : Optional[Any] = None , snake_case_ : List[str] = None , snake_case_ : Optional[Any] = True , **snake_case_ : Optional[Any] , ):
super().__init__(**snake_case__ )
__a : List[str] = size if size is not None else {'''height''': 3_8_4, '''width''': 3_8_4}
__a : List[str] = get_size_dict(snake_case__ , default_to_square=snake_case__ )
__a : List[str] = do_resize
__a : Tuple = size
__a : Optional[Any] = resample
__a : Optional[Any] = do_rescale
__a : Dict = rescale_factor
__a : Union[str, Any] = do_normalize
__a : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__a : Any = image_std if image_std is not None else OPENAI_CLIP_STD
__a : List[Any] = do_convert_rgb
def lowerCAmelCase (self : Dict , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : int = PILImageResampling.BICUBIC , snake_case_ : int = None , **snake_case_ : Any , ):
__a : int = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
__a : Union[str, Any] = (size['''height'''], size['''width'''])
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def lowerCAmelCase (self : Dict , snake_case_ : Dict , snake_case_ : Any , snake_case_ : List[Any] = None , **snake_case_ : Any , ):
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def lowerCAmelCase (self : Optional[Any] , snake_case_ : int , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Optional[int] = None , **snake_case_ : Tuple , ):
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def lowerCAmelCase (self : str , snake_case_ : List[str] , snake_case_ : int = None , snake_case_ : int = None , snake_case_ : List[Any] = None , snake_case_ : Tuple = None , snake_case_ : int = None , snake_case_ : str = None , snake_case_ : str = None , snake_case_ : Union[str, Any] = None , snake_case_ : Dict = None , snake_case_ : Any = None , snake_case_ : Optional[Any] = ChannelDimension.FIRST , **snake_case_ : Tuple , ):
__a : List[str] = do_resize if do_resize is not None else self.do_resize
__a : Union[str, Any] = resample if resample is not None else self.resample
__a : Any = do_rescale if do_rescale is not None else self.do_rescale
__a : int = rescale_factor if rescale_factor is not None else self.rescale_factor
__a : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__a : List[str] = image_mean if image_mean is not None else self.image_mean
__a : List[Any] = image_std if image_std is not None else self.image_std
__a : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__a : List[Any] = size if size is not None else self.size
__a : Tuple = get_size_dict(snake_case__ , default_to_square=snake_case__ )
__a : int = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__a : Optional[int] = [convert_to_rgb(snake_case__ ) for image in images]
# All transformations expect numpy arrays.
__a : str = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
__a : List[Any] = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_rescale:
__a : int = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
__a : Dict = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
__a : Optional[int] = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
__a : str = BatchFeature(data={'''pixel_values''': images} , tensor_type=snake_case__ )
return encoded_outputs
| 362 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowercase__ =logging.get_logger(__name__)
lowercase__ ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ ={
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
lowercase__ ={'allegro/herbert-base-cased': 514}
lowercase__ ={}
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Optional[Any] = HerbertTokenizer
def __init__(self : str , snake_case_ : Optional[Any]=None , snake_case_ : Tuple=None , snake_case_ : Dict=None , snake_case_ : List[str]="<s>" , snake_case_ : Any="<unk>" , snake_case_ : Optional[int]="<pad>" , snake_case_ : Optional[Any]="<mask>" , snake_case_ : List[Any]="</s>" , **snake_case_ : Optional[int] , ):
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , sep_token=snake_case_ , **snake_case_ , )
def lowerCAmelCase (self : int , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
__a : int = [self.cls_token_id]
__a : Union[str, Any] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase (self : Optional[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
__a : Optional[int] = [self.sep_token_id]
__a : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase (self : int , snake_case_ : str , snake_case_ : Optional[str] = None ):
__a : Any = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 90 | 0 |
"""simple docstring"""
_a = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 61 |
"""simple docstring"""
import argparse
from collections import defaultdict
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__lowerCamelCase, "r" ) as f:
UpperCAmelCase_ : List[Any] = f.readlines()
UpperCAmelCase_ : int = f"""class {class_name}("""
UpperCAmelCase_ : Optional[Any] = f"""{4 * " "}def {test_name}("""
UpperCAmelCase_ : Optional[Any] = f"""{8 * " "}{correct_line.split()[0]}"""
UpperCAmelCase_ : Tuple = f"""{16 * " "}{correct_line.split()[0]}"""
UpperCAmelCase_ : int = False
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : int = []
for line in lines:
if line.startswith(__lowerCamelCase ):
UpperCAmelCase_ : Tuple = True
elif in_class and line.startswith(__lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = True
elif in_class and in_func and (line.startswith(__lowerCamelCase ) or line.startswith(__lowerCamelCase )):
UpperCAmelCase_ : Any = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase_ : Union[str, Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase_ : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * " "}{correct_line}""" )
UpperCAmelCase_ : int = False
else:
new_lines.append(__lowerCamelCase )
with open(__lowerCamelCase, "w" ) as f:
for line in new_lines:
f.write(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase=None ):
if fail is not None:
with open(__lowerCamelCase, "r" ) as f:
UpperCAmelCase_ : Tuple = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase_ : str = None
with open(__lowerCamelCase, "r" ) as f:
UpperCAmelCase_ : Optional[int] = f.readlines()
UpperCAmelCase_ : Any = defaultdict(__lowerCamelCase )
for line in correct_lines:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
_a = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 61 | 1 |
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( A_ , A_ , A_ )-> int:
'''simple docstring'''
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def lowercase ( A_ , A_ , A_ , A_="attention" )-> Dict:
'''simple docstring'''
a : Optional[Any] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
a : List[str] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
a : str = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
a : Dict = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
a : Any = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
a : Optional[Any] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
a : Dict = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
a : Union[str, Any] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowercase ( A_ , A_ , A_ , A_=False )-> List[str]:
'''simple docstring'''
if split_mlp_wi:
a : int = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
a : Any = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
a : Tuple = (wi_a, wi_a)
else:
a : Optional[Any] = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
a : Any = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def lowercase ( A_ , A_ , A_ , A_ )-> Any:
'''simple docstring'''
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def lowercase ( A_ , *, A_ , A_ , A_ = False )-> str:
'''simple docstring'''
a : Optional[int] = traverse_util.flatten_dict(variables["target"] )
a : List[Any] = {"/".join(A_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
a : Optional[Any] = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , A_ )
a : Optional[int] = collections.OrderedDict()
# Shared embeddings.
a : List[str] = old["token_embedder/embedding"]
# Encoder.
for i in range(A_ ):
# Block i, layer 0 (Self Attention).
a : Union[str, Any] = tax_layer_norm_lookup(A_ , A_ , "encoder" , "pre_attention_layer_norm" )
a , a , a , a : str = tax_attention_lookup(A_ , A_ , "encoder" , "attention" )
a : Union[str, Any] = layer_norm
a : Dict = k.T
a : List[str] = o.T
a : List[Any] = q.T
a : str = v.T
# Block i, layer 1 (MLP).
a : Dict = tax_layer_norm_lookup(A_ , A_ , "encoder" , "pre_mlp_layer_norm" )
a , a : Dict = tax_mlp_lookup(A_ , A_ , "encoder" , A_ )
a : Union[str, Any] = layer_norm
if split_mlp_wi:
a : Union[str, Any] = wi[0].T
a : Union[str, Any] = wi[1].T
else:
a : Any = wi.T
a : Dict = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
a : List[Any] = tax_relpos_bias_lookup(
A_ , A_ , "encoder" ).T
a : Tuple = old["encoder/encoder_norm/scale"]
if not scalable_attention:
a : Optional[int] = tax_relpos_bias_lookup(
A_ , 0 , "encoder" ).T
a : Any = tax_relpos_bias_lookup(
A_ , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(A_ ):
# Block i, layer 0 (Self Attention).
a : int = tax_layer_norm_lookup(A_ , A_ , "decoder" , "pre_self_attention_layer_norm" )
a , a , a , a : Any = tax_attention_lookup(A_ , A_ , "decoder" , "self_attention" )
a : Optional[Any] = layer_norm
a : Optional[int] = k.T
a : Optional[Any] = o.T
a : int = q.T
a : Union[str, Any] = v.T
# Block i, layer 1 (Cross Attention).
a : List[str] = tax_layer_norm_lookup(A_ , A_ , "decoder" , "pre_cross_attention_layer_norm" )
a , a , a , a : str = tax_attention_lookup(A_ , A_ , "decoder" , "encoder_decoder_attention" )
a : int = layer_norm
a : Dict = k.T
a : int = o.T
a : Tuple = q.T
a : Tuple = v.T
# Block i, layer 2 (MLP).
a : int = tax_layer_norm_lookup(A_ , A_ , "decoder" , "pre_mlp_layer_norm" )
a , a : List[str] = tax_mlp_lookup(A_ , A_ , "decoder" , A_ )
a : List[Any] = layer_norm
if split_mlp_wi:
a : List[Any] = wi[0].T
a : List[Any] = wi[1].T
else:
a : List[Any] = wi.T
a : Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
a : int = tax_relpos_bias_lookup(A_ , A_ , "decoder" ).T
a : int = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
a : str = old["decoder/logits_dense/kernel"].T
return new
def lowercase ( A_ , A_ )-> Optional[Any]:
'''simple docstring'''
a : List[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
a : str = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
a : Any = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
a : Dict = state_dict["shared.weight"]
return state_dict
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Dict:
'''simple docstring'''
a : Dict = checkpoints.load_tax_checkpoint(A_ )
a : Any = convert_tax_to_pytorch(
A_ , num_layers=config.num_layers , is_encoder_only=A_ , scalable_attention=A_ )
a : Dict = make_state_dict(A_ , A_ )
model.load_state_dict(A_ , strict=A_ )
def lowercase ( A_ , A_ , A_ , A_ = False , A_ = False , )-> Tuple:
'''simple docstring'''
a : List[Any] = MTaConfig.from_json_file(A_ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
a : str = UMTaEncoderModel(A_ )
else:
a : List[str] = UMTaForConditionalGeneration(A_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(A_ , A_ , A_ , A_ , A_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(A_ )
# Verify that we can load the checkpoint.
model.from_pretrained(A_ )
print("Done" )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
__lowercase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 226 |
"""simple docstring"""
__lowercase = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
__lowercase = frozenset(["""prompt""", """negative_prompt"""])
__lowercase = frozenset([])
__lowercase = frozenset(["""image"""])
__lowercase = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
__lowercase = frozenset(["""image"""])
__lowercase = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
__lowercase = frozenset(["""prompt""", """image""", """negative_prompt"""])
__lowercase = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
__lowercase = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
__lowercase = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
__lowercase = frozenset(["""image""", """mask_image"""])
__lowercase = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
__lowercase = frozenset(["""example_image""", """image""", """mask_image"""])
__lowercase = frozenset(["""class_labels"""])
__lowercase = frozenset(["""class_labels"""])
__lowercase = frozenset(["""batch_size"""])
__lowercase = frozenset([])
__lowercase = frozenset(["""batch_size"""])
__lowercase = frozenset([])
__lowercase = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
__lowercase = frozenset(["""prompt""", """negative_prompt"""])
__lowercase = frozenset(["""input_tokens"""])
__lowercase = frozenset(["""input_tokens"""])
| 226 | 1 |
import math
def _A ( SCREAMING_SNAKE_CASE__ : int ):
return math.sqrt(SCREAMING_SNAKE_CASE__ ) * math.sqrt(SCREAMING_SNAKE_CASE__ ) == num
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :List[str] = 0
UpperCamelCase :Optional[Any] = n
while left <= right:
UpperCamelCase :List[str] = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCamelCase :List[Any] = mid - 1
else:
UpperCamelCase :str = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
SCREAMING_SNAKE_CASE__ = 10
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int:
for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if array[i] == target:
return i
return -1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int:
__lowercase = 0
__lowercase = len(SCREAMING_SNAKE_CASE )
while left <= right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = (left + right) // 3 + 1
__lowercase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__lowercase = one_third - 1
elif array[two_third] < target:
__lowercase = two_third + 1
else:
__lowercase = one_third + 1
__lowercase = two_third - 1
else:
return -1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int:
if left < right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = (left + right) // 3 + 1
__lowercase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(SCREAMING_SNAKE_CASE , one_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = input("""Enter numbers separated by comma:\n""").strip()
SCREAMING_SNAKE_CASE__ = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
SCREAMING_SNAKE_CASE__ = int(input("""Enter the number to be found in the list:\n""").strip())
SCREAMING_SNAKE_CASE__ = ite_ternary_search(collection, target)
SCREAMING_SNAKE_CASE__ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 325 | 0 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class _a ( unittest.TestCase):
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict )-> str:
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Any )-> Optional[int]:
lowerCAmelCase__ : int = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def UpperCAmelCase__( self : Any )-> Union[str, Any]:
lowerCAmelCase__ : Tuple = None
ops.enable_eager_execution_internal()
lowerCAmelCase__ : Tuple = tf.config.list_physical_devices('''CPU''' )
if len(_SCREAMING_SNAKE_CASE ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCAmelCase__ : Optional[int] = tf.config.list_logical_devices(device_type='''CPU''' )
lowerCAmelCase__ : Any = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCAmelCase__ : Optional[int] = GradientAccumulator()
lowerCAmelCase__ : List[str] = tf.Variable([4.0, 3.0] )
lowerCAmelCase__ , lowerCAmelCase__ : Dict = create_optimizer(5E-5 , 10 , 5 )
lowerCAmelCase__ : int = tf.Variable([0.0, 0.0] , trainable=_SCREAMING_SNAKE_CASE )
def accumulate_on_replica(_SCREAMING_SNAKE_CASE : Any ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] ):
with strategy.scope():
lowerCAmelCase__ : Optional[Any] = strategy.experimental_local_results(_SCREAMING_SNAKE_CASE )
local_variables[0].assign(_SCREAMING_SNAKE_CASE )
local_variables[1].assign(_SCREAMING_SNAKE_CASE )
strategy.run(_SCREAMING_SNAKE_CASE , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_SCREAMING_SNAKE_CASE )
def _check_local_values(_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] ):
lowerCAmelCase__ : Optional[Any] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _SCREAMING_SNAKE_CASE , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _SCREAMING_SNAKE_CASE , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 211 |
import random
from .binary_exp_mod import bin_exp_mod
def lowerCamelCase_ ( _a , _a=1_000 ):
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCAmelCase__ : int = n - 1
lowerCAmelCase__ : Any = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCAmelCase__ : Optional[Any] = 0
while count < prec:
lowerCAmelCase__ : Optional[Any] = random.randint(2 , n - 1 )
lowerCAmelCase__ : List[Any] = bin_exp_mod(_a , _a , _a )
if b != 1:
lowerCAmelCase__ : Dict = True
for _ in range(_a ):
if b == n - 1:
lowerCAmelCase__ : Union[str, Any] = False
break
lowerCAmelCase__ : Tuple = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCamelCase = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 211 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = '''▁'''
_snake_case = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_snake_case = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
_snake_case = {
'''facebook/mbart-large-50-one-to-many-mmt''': 10_24,
}
# fmt: off
_snake_case = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : List[str] = VOCAB_FILES_NAMES
__A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Tuple = PRETRAINED_VOCAB_FILES_MAP
__A : int = ["input_ids", "attention_mask"]
__A : List[int] = []
__A : List[int] = []
def __init__( self , __A , __A=None , __A=None , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A = None , **__A , ):
"""simple docstring"""
lowerCamelCase : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
lowerCamelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase : Union[str, Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__A , tgt_lang=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
lowerCamelCase : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase : List[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase : str = 1
lowerCamelCase : Tuple = len(self.sp_model )
lowerCamelCase : Optional[int] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__A )
}
lowerCamelCase : str = {v: k for k, v in self.lang_code_to_id.items()}
lowerCamelCase : List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCamelCase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCamelCase : str = src_lang if src_lang is not None else "en_XX"
lowerCamelCase : Optional[int] = self.lang_code_to_id[self._src_lang]
lowerCamelCase : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _snake_case ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.__dict__.copy()
lowerCamelCase : Dict = None
return state
def __setstate__( self , __A ):
"""simple docstring"""
lowerCamelCase : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase : Any = {}
lowerCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , __A ):
"""simple docstring"""
return self.sp_model.encode(__A , out_type=__A )
def _snake_case ( self , __A ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase : Union[str, Any] = self.sp_model.PieceToId(__A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , __A ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : List[Any] = []
lowerCamelCase : Dict = ""
lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
lowerCamelCase : List[Any] = True
lowerCamelCase : int = []
else:
current_sub_tokens.append(__A )
lowerCamelCase : Optional[Any] = False
out_string += self.sp_model.decode(__A )
return out_string.strip()
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
if not os.path.isdir(__A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase : int = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , "wb" ) as fi:
lowerCamelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def _snake_case ( self , __A , __A = None , __A = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
lowerCamelCase : Union[str, Any] = [1] * len(self.prefix_tokens )
lowerCamelCase : List[str] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__A )) + suffix_ones
return prefix_ones + ([0] * len(__A )) + ([0] * len(__A )) + suffix_ones
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , __A , __A , __A , __A , **__A ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase : int = src_lang
lowerCamelCase : Any = self(__A , add_special_tokens=__A , return_tensors=__A , **__A )
lowerCamelCase : Union[str, Any] = self.convert_tokens_to_ids(__A )
lowerCamelCase : List[str] = tgt_lang_id
return inputs
def _snake_case ( self , __A , __A = "en_XX" , __A = None , __A = "ro_RO" , **__A , ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = src_lang
lowerCamelCase : Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(__A , __A , **__A )
def _snake_case ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Tuple = self.lang_code_to_id[src_lang]
lowerCamelCase : Dict = [self.cur_lang_code_id]
lowerCamelCase : Optional[Any] = [self.eos_token_id]
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Dict = self.lang_code_to_id[tgt_lang]
lowerCamelCase : Any = [self.cur_lang_code_id]
lowerCamelCase : List[str] = [self.eos_token_id]
| 283 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = "mobilenet_v2"
def __init__( self , __A=3 , __A=224 , __A=1.0 , __A=8 , __A=8 , __A=6 , __A=32 , __A=True , __A=True , __A="relu6" , __A=True , __A=0.8 , __A=0.02 , __A=0.001 , __A=255 , **__A , ):
"""simple docstring"""
super().__init__(**__A )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCamelCase : str = num_channels
lowerCamelCase : Any = image_size
lowerCamelCase : Union[str, Any] = depth_multiplier
lowerCamelCase : Tuple = depth_divisible_by
lowerCamelCase : Dict = min_depth
lowerCamelCase : Dict = expand_ratio
lowerCamelCase : Optional[Any] = output_stride
lowerCamelCase : int = first_layer_is_expansion
lowerCamelCase : Union[str, Any] = finegrained_output
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Optional[Any] = tf_padding
lowerCamelCase : Optional[Any] = classifier_dropout_prob
lowerCamelCase : Dict = initializer_range
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : Optional[Any] = semantic_loss_ignore_index
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = version.parse("1.11" )
@property
def _snake_case ( self ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self ):
"""simple docstring"""
return 1e-4
| 283 | 1 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
__snake_case :Any = logging.get_logger(__name__)
__snake_case :Any = '''Hello world! cécé herlolip'''
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = FairseqRobertaModel.from_pretrained(_UpperCAmelCase )
roberta.eval() # disable dropout
__a = roberta.model.encoder.sentence_encoder
__a = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
__a = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , _UpperCAmelCase )
__a = XLMRobertaXLForSequenceClassification(_UpperCAmelCase ) if classification_head else XLMRobertaXLForMaskedLM(_UpperCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
__a = roberta_sent_encoder.embed_tokens.weight
__a = roberta_sent_encoder.embed_positions.weight
__a = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__a = roberta_sent_encoder.layer_norm.weight
__a = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__a = model.roberta.encoder.layer[i]
__a = roberta_sent_encoder.layers[i]
__a = layer.attention
__a = roberta_layer.self_attn_layer_norm.weight
__a = roberta_layer.self_attn_layer_norm.bias
# self attention
__a = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__a = roberta_layer.self_attn.q_proj.weight
__a = roberta_layer.self_attn.q_proj.bias
__a = roberta_layer.self_attn.k_proj.weight
__a = roberta_layer.self_attn.k_proj.bias
__a = roberta_layer.self_attn.v_proj.weight
__a = roberta_layer.self_attn.v_proj.bias
# self-attention output
__a = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__a = roberta_layer.self_attn.out_proj.weight
__a = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__a = roberta_layer.final_layer_norm.weight
__a = roberta_layer.final_layer_norm.bias
# intermediate
__a = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__a = roberta_layer.fca.weight
__a = roberta_layer.fca.bias
# output
__a = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__a = roberta_layer.fca.weight
__a = roberta_layer.fca.bias
# end of layer
if classification_head:
__a = roberta.model.classification_heads['''mnli'''].dense.weight
__a = roberta.model.classification_heads['''mnli'''].dense.bias
__a = roberta.model.classification_heads['''mnli'''].out_proj.weight
__a = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
__a = roberta.model.encoder.lm_head.dense.weight
__a = roberta.model.encoder.lm_head.dense.bias
__a = roberta.model.encoder.lm_head.layer_norm.weight
__a = roberta.model.encoder.lm_head.layer_norm.bias
__a = roberta.model.encoder.lm_head.weight
__a = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__a = roberta.encode(_UpperCAmelCase ).unsqueeze(0 ) # batch of size 1
__a = model(_UpperCAmelCase )[0]
if classification_head:
__a = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_UpperCAmelCase ) )
else:
__a = roberta.model(_UpperCAmelCase )[0]
print(our_output.shape , their_output.shape )
__a = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
__a = torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(_UpperCAmelCase ).mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__snake_case :Tuple = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 131 |
def __snake_case ( _UpperCAmelCase = 1000000 ):
__a = limit + 1
__a = [0] * limit
for first_term in range(1 , _UpperCAmelCase ):
for n in range(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__a = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f'{solution() = }')
| 131 | 1 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _SCREAMING_SNAKE_CASE :
def __lowerCAmelCase ( self , __A , __A , __A ) -> List[str]:
return None
class _SCREAMING_SNAKE_CASE :
def __lowerCAmelCase ( self , __A , __A , __A , __A ) -> int:
return None
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
UpperCAmelCase_ :Tuple = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __lowerCAmelCase ( self ) -> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__A , """tf""" , 12 , **__A )
@require_torch
@slow
def __lowerCAmelCase ( self ) -> List[str]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__A , """pt""" , 12 , **__A )
@require_torch
@slow
def __lowerCAmelCase ( self ) -> str:
from transformers import BertModel
lowerCAmelCase_ :Tuple = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__A ) )
vocab_file.flush()
lowerCAmelCase_ :Optional[Any] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCAmelCase_ :List[str] = BertModel(BertConfig(vocab_size=len(__A ) ) )
model.save_pretrained(__A )
self._test_export(__A , """pt""" , 12 , __A )
@require_tf
@slow
def __lowerCAmelCase ( self ) -> Dict:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCAmelCase_ :Optional[int] = self._test_export(__A , """tf""" , 12 , **__A )
lowerCAmelCase_ :List[str] = quantize(Path(__A ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__A ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def __lowerCAmelCase ( self ) -> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCAmelCase_ :str = self._test_export(__A , """pt""" , 12 , **__A )
lowerCAmelCase_ :Optional[Any] = quantize(__A )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__A ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def __lowerCAmelCase ( self , __A , __A , __A , __A=None , **__A ) -> str:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCAmelCase_ :Any = Path(__A ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__A , __A , __A , __A , __A , **__A )
return path
except Exception as e:
self.fail(__A )
@require_torch
@require_tokenizers
@slow
def __lowerCAmelCase ( self ) -> Any:
from transformers import BertModel
lowerCAmelCase_ :Tuple = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCAmelCase_ :int = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__A , __A , """pt""" )
@require_tf
@require_tokenizers
@slow
def __lowerCAmelCase ( self ) -> Any:
from transformers import TFBertModel
lowerCAmelCase_ :int = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCAmelCase_ :int = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__A , __A , """tf""" )
def __lowerCAmelCase ( self , __A , __A , __A ) -> List[str]:
lowerCAmelCase_ :str = FeatureExtractionPipeline(__A , __A )
lowerCAmelCase_ :str = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[str] = infer_shapes(__A , __A )
# Assert all variables are present
self.assertEqual(len(__A ) , len(__A ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __A )
self.assertSequenceEqual(variable_names[3:] , __A )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :str = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCAmelCase_ :str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCAmelCase_ , lowerCAmelCase_ :str = ensure_valid_input(FuncContiguousArgs() , __A , __A )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__A ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__A ) , set(__A ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__A , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCAmelCase_ , lowerCAmelCase_ :Any = ensure_valid_input(FuncNonContiguousArgs() , __A , __A )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__A ) , 1 )
self.assertEqual(len(__A ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :int = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 84 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> Optional[Any]:
super().__init__()
lowerCAmelCase_ :int = nn.ModuleList(__A )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A = None , __A = None , __A = None , __A = None , __A = False , __A = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(__A , __A , self.nets ) ):
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = controlnet(
__A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , )
# merge samples
if i == 0:
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = down_samples, mid_sample
else:
lowerCAmelCase_ :str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__A , __A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self , __A , __A = True , __A = None , __A = False , __A = None , ) -> Optional[Any]:
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Dict = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__A , is_main_process=__A , save_function=__A , safe_serialization=__A , variant=__A , )
idx += 1
lowerCAmelCase_ :Any = model_path_to_save + f"""_{idx}"""
@classmethod
def __lowerCAmelCase ( cls , __A , **__A ) -> List[Any]:
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Dict = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
lowerCAmelCase_ :List[Any] = pretrained_model_path
while os.path.isdir(__A ):
lowerCAmelCase_ :Tuple = ControlNetModel.from_pretrained(__A , **__A )
controlnets.append(__A )
idx += 1
lowerCAmelCase_ :Dict = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(__A )} controlnets loaded from {pretrained_model_path}.""" )
if len(__A ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(__A )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(__A )
| 84 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : Dict = '''van'''
def __init__( self : Optional[Any] , _A : Union[str, Any]=224 , _A : Optional[int]=3 , _A : str=[7, 3, 3, 3] , _A : Optional[Any]=[4, 2, 2, 2] , _A : str=[64, 128, 320, 512] , _A : str=[3, 3, 12, 3] , _A : Union[str, Any]=[8, 8, 4, 4] , _A : int="gelu" , _A : Any=0.02 , _A : Optional[Any]=1E-6 , _A : int=1E-2 , _A : Optional[int]=0.0 , _A : List[Any]=0.0 , **_A : str , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**_A )
lowercase : Dict = image_size
lowercase : Tuple = num_channels
lowercase : Tuple = patch_sizes
lowercase : List[str] = strides
lowercase : str = hidden_sizes
lowercase : List[Any] = depths
lowercase : str = mlp_ratios
lowercase : Tuple = hidden_act
lowercase : List[str] = initializer_range
lowercase : str = layer_norm_eps
lowercase : List[str] = layer_scale_init_value
lowercase : int = drop_path_rate
lowercase : int = dropout_rate | 116 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase_ = '\\n Text data.\n Second line of data.'
lowerCAmelCase_ = 'file'
@pytest.fixture(scope='''session''' )
def snake_case( __magic_name__ ) -> List[str]:
'''simple docstring'''
lowercase : Any = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
lowercase : List[Any] = bytes(__magic_name__ , '''utf-8''' )
with zstd.open(__magic_name__ , '''wb''' ) as f:
f.write(__magic_name__ )
return path
@pytest.fixture
def snake_case( __magic_name__ ) -> List[Any]:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , __magic_name__ ) , '''w''' ) as f:
f.write(__magic_name__ )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
lowercase : Optional[int] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
lowercase : int = input_paths[compression_format]
lowercase : Any = tmp_path / '''cache'''
lowercase : int = DownloadConfig(cache_dir=__magic_name__ , extract_compressed_file=__magic_name__ )
lowercase : Optional[Any] = cached_path(__magic_name__ , download_config=__magic_name__ )
with open(__magic_name__ ) as f:
lowercase : Optional[int] = f.read()
with open(__magic_name__ ) as f:
lowercase : str = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
lowercase : Dict = '''custom_cache'''
lowercase : Union[str, Any] = '''custom_extracted_dir'''
lowercase : str = tmp_path / '''custom_extracted_path'''
if default_extracted:
lowercase : Tuple = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , __magic_name__ )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(__magic_name__ ) )
lowercase : int = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowercase : List[str] = xz_file
lowercase : Any = (
DownloadConfig(extract_compressed_file=__magic_name__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__magic_name__ )
)
lowercase : Optional[int] = cached_path(__magic_name__ , download_config=__magic_name__ )
assert Path(__magic_name__ ).parent.parts[-2:] == expected
def snake_case( __magic_name__ ) -> List[Any]:
'''simple docstring'''
lowercase : Any = str(Path(__magic_name__ ).resolve() )
assert cached_path(__magic_name__ ) == text_file
# relative path
lowercase : Union[str, Any] = str(Path(__magic_name__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__magic_name__ ) == text_file
def snake_case( __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase : List[Any] = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(__magic_name__ ):
cached_path(__magic_name__ )
# relative path
lowercase : Optional[int] = '''./__missing_file__.txt'''
with pytest.raises(__magic_name__ ):
cached_path(__magic_name__ )
def snake_case( __magic_name__ ) -> List[str]:
'''simple docstring'''
lowercase : List[str] = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(__magic_name__ ) as f:
lowercase : List[Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __magic_name__ )
def snake_case( ) -> List[Any]:
'''simple docstring'''
with pytest.raises(__magic_name__ ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __magic_name__ )
def snake_case( __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : Any = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__magic_name__ ):
http_get('''https://huggingface.co''' , temp_file=__magic_name__ )
with pytest.raises(__magic_name__ ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __magic_name__ )
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__magic_name__ ):
ftp_get('''ftp://huggingface.co''' , temp_file=__magic_name__ )
with pytest.raises(__magic_name__ ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __magic_name__ )
def snake_case( __magic_name__ ) -> List[str]:
'''simple docstring'''
lowercase : str = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__magic_name__ ):
fsspec_get('''s3://huggingface.co''' , temp_file=__magic_name__ )
with pytest.raises(__magic_name__ ):
fsspec_head('''s3://huggingface.co''' ) | 116 | 1 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowercase ( ):
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
_UpperCAmelCase = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' )
_UpperCAmelCase = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' )
_UpperCAmelCase = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' )
_UpperCAmelCase = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' )
_UpperCAmelCase = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' )
_UpperCAmelCase = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' )
_UpperCAmelCase = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' )
_UpperCAmelCase = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' )
_UpperCAmelCase = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' )
_UpperCAmelCase = key.replace('''image_encoder.module''' , '''flava.image_model''' )
_UpperCAmelCase = key.replace('''text_encoder.module''' , '''flava.text_model''' )
_UpperCAmelCase = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' )
_UpperCAmelCase = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' )
_UpperCAmelCase = key.replace('''text_projection''' , '''flava.text_projection''' )
_UpperCAmelCase = key.replace('''image_projection''' , '''flava.image_projection''' )
_UpperCAmelCase = value.float()
for key, value in codebook_state_dict.items():
_UpperCAmelCase = value
return upgrade
@torch.no_grad()
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int]=None ):
'''simple docstring'''
if config_path is not None:
_UpperCAmelCase = FlavaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = FlavaConfig()
_UpperCAmelCase = FlavaForPreTraining(_SCREAMING_SNAKE_CASE ).eval()
_UpperCAmelCase = convert_dalle_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , save_checkpoint=_SCREAMING_SNAKE_CASE )
if os.path.exists(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )
else:
_UpperCAmelCase = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )
_UpperCAmelCase = upgrade_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = hf_model.state_dict()
_UpperCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) + count_parameters(_SCREAMING_SNAKE_CASE )
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
__A : Optional[Any] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 260 | 1 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCamelCase_ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
'''simple docstring'''
a__ = "pixel_values"
a__ = False
a__ = TimmBackboneConfig
def __init__( self : Tuple , __lowerCamelCase : int , **__lowerCamelCase : int ) -> Union[str, Any]:
requires_backends(self , "timm" )
super().__init__(a__ )
A : Dict = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(a__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
A : Optional[int] = getattr(a__ , "use_pretrained_backbone" , a__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
A : List[str] = config.out_indices if getattr(a__ , "out_indices" , a__ ) is not None else (-1,)
A : Any = timm.create_model(
config.backbone , pretrained=a__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=a__ , **a__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
A : Any = self._backbone.return_layers
A : List[Any] = {layer["module"]: str(a__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(a__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , __lowerCamelCase : Union[str, Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : str ) -> Tuple:
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
A : Dict = kwargs.pop("config" , TimmBackboneConfig() )
A : List[str] = kwargs.pop("use_timm_backbone" , a__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
A : Any = kwargs.pop("num_channels" , config.num_channels )
A : Optional[int] = kwargs.pop("features_only" , config.features_only )
A : List[str] = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
A : Tuple = kwargs.pop("out_indices" , config.out_indices )
A : List[Any] = TimmBackboneConfig(
backbone=a__ , num_channels=a__ , features_only=a__ , use_pretrained_backbone=a__ , out_indices=a__ , )
return super()._from_config(a__ , **a__ )
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : str ) -> int:
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[int] ) -> Union[str, Any]:
A : Any = return_dict if return_dict is not None else self.config.use_return_dict
A : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A : Dict = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
A : Union[str, Any] = self._all_layers
A : Optional[Any] = self._backbone(a__ , **a__ )
A : Union[str, Any] = self._return_layers
A : Tuple = tuple(hidden_states[i] for i in self.out_indices )
else:
A : List[str] = self._backbone(a__ , **a__ )
A : Union[str, Any] = None
A : List[str] = tuple(a__ )
A : Union[str, Any] = tuple(a__ ) if hidden_states is not None else None
if not return_dict:
A : Dict = (feature_maps,)
if output_hidden_states:
A : Optional[int] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a__ , hidden_states=a__ , attentions=a__ ) | 364 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__SCREAMING_SNAKE_CASE = 3
def UpperCAmelCase ( _lowerCamelCase ):
print("Generating primitive root of p" )
while True:
A : str = random.randrange(3 , _lowerCamelCase )
if pow(_lowerCamelCase , 2 , _lowerCamelCase ) == 1:
continue
if pow(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) == 1:
continue
return g
def UpperCAmelCase ( _lowerCamelCase ):
print("Generating prime p..." )
A : int = rabin_miller.generate_large_prime(_lowerCamelCase ) # select large prime number.
A : List[str] = primitive_root(_lowerCamelCase ) # one primitive root on modulo p.
A : int = random.randrange(3 , _lowerCamelCase ) # private_key -> have to be greater than 2 for safety.
A : Tuple = cryptomath.find_mod_inverse(pow(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
A : int = (key_size, e_a, e_a, p)
A : str = (key_size, d)
return public_key, private_key
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
A , A : Any = generate_key(_lowerCamelCase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , "w" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , "w" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def UpperCAmelCase ( ):
print("Making key files..." )
make_key_files("elgamal" , 2048 )
print("Key files generation successful" )
if __name__ == "__main__":
main() | 256 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__SCREAMING_SNAKE_CASE :Any = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_a )
class A_ ( _a ):
_lowerCamelCase : List[Any] = """rag"""
_lowerCamelCase : Union[str, Any] = True
def __init__( self : Tuple , snake_case_ : Any=None , snake_case_ : Any=True , snake_case_ : List[Any]=None , snake_case_ : Optional[int]=None , snake_case_ : List[Any]=None , snake_case_ : str=None , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=" / " , snake_case_ : Union[str, Any]=" // " , snake_case_ : List[Any]=5 , snake_case_ : Optional[int]=3_0_0 , snake_case_ : Tuple=7_6_8 , snake_case_ : Tuple=8 , snake_case_ : Optional[Any]="wiki_dpr" , snake_case_ : int="train" , snake_case_ : Union[str, Any]="compressed" , snake_case_ : Optional[int]=None , snake_case_ : List[Any]=None , snake_case_ : List[str]=False , snake_case_ : List[str]=False , snake_case_ : str=0.0 , snake_case_ : List[Any]=True , snake_case_ : Tuple=False , snake_case_ : int=False , snake_case_ : Dict=False , snake_case_ : Tuple=True , snake_case_ : int=None , **snake_case_ : Optional[int] , ):
super().__init__(
bos_token_id=A_ , pad_token_id=A_ , eos_token_id=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , is_encoder_decoder=A_ , prefix=A_ , vocab_size=A_ , **A_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_UpperCAmelCase = kwargs.pop("question_encoder" )
_UpperCAmelCase = question_encoder_config.pop("model_type" )
_UpperCAmelCase = kwargs.pop("generator" )
_UpperCAmelCase = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_UpperCAmelCase = AutoConfig.for_model(A_ , **A_ )
_UpperCAmelCase = AutoConfig.for_model(A_ , **A_ )
_UpperCAmelCase = reduce_loss
_UpperCAmelCase = label_smoothing
_UpperCAmelCase = exclude_bos_score
_UpperCAmelCase = do_marginalize
_UpperCAmelCase = title_sep
_UpperCAmelCase = doc_sep
_UpperCAmelCase = n_docs
_UpperCAmelCase = max_combined_length
_UpperCAmelCase = dataset
_UpperCAmelCase = dataset_split
_UpperCAmelCase = index_name
_UpperCAmelCase = retrieval_vector_size
_UpperCAmelCase = retrieval_batch_size
_UpperCAmelCase = passages_path
_UpperCAmelCase = index_path
_UpperCAmelCase = use_dummy_dataset
_UpperCAmelCase = output_retrieved
_UpperCAmelCase = do_deduplication
_UpperCAmelCase = use_cache
if self.forced_eos_token_id is None:
_UpperCAmelCase = getattr(self.generator , "forced_eos_token_id" , A_ )
@classmethod
def lowercase ( cls : Any , snake_case_ : PretrainedConfig , snake_case_ : PretrainedConfig , **snake_case_ : int ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **A_ )
def lowercase ( self : Tuple ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.question_encoder.to_dict()
_UpperCAmelCase = self.generator.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 22 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class __lowerCamelCase (_a ):
_lowercase = """xlm-roberta"""
def __init__( self: Union[str, Any],A_: Union[str, Any]=3_0522,A_: Dict=768,A_: Union[str, Any]=12,A_: Any=12,A_: str=3072,A_: Union[str, Any]="gelu",A_: str=0.1,A_: Optional[int]=0.1,A_: List[Any]=512,A_: Optional[Any]=2,A_: Dict=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=1,A_: str=0,A_: str=2,A_: Optional[Any]="absolute",A_: Union[str, Any]=True,A_: int=None,**A_: Optional[Any],):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 310 | 0 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a__ =(UnCLIPScheduler,)
def __lowerCAmelCase ( self , **A ) -> Tuple:
_UpperCAmelCase : Tuple = {
"num_train_timesteps": 1_0_0_0,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def __lowerCAmelCase ( self ) -> str:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE , prev_timestep=_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : List[str] = self.scheduler_classes[0]
_UpperCAmelCase : Dict = self.get_scheduler_config(variance_type='''fixed_small_log''' )
_UpperCAmelCase : str = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9_994_987 ) ) < 1E-5
def __lowerCAmelCase ( self ) -> List[Any]:
_UpperCAmelCase : Any = self.scheduler_classes[0]
_UpperCAmelCase : Optional[Any] = self.get_scheduler_config(variance_type='''learned_range''' )
_UpperCAmelCase : Dict = scheduler_class(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[str] = 0.5
assert scheduler._get_variance(1 , predicted_variance=_SCREAMING_SNAKE_CASE ) - -10.1_712_790 < 1E-5
assert scheduler._get_variance(4_8_7 , predicted_variance=_SCREAMING_SNAKE_CASE ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(9_9_9 , predicted_variance=_SCREAMING_SNAKE_CASE ) - -0.0_010_011 < 1E-5
def __lowerCAmelCase ( self ) -> List[Any]:
_UpperCAmelCase : Dict = self.scheduler_classes[0]
_UpperCAmelCase : Tuple = self.get_scheduler_config()
_UpperCAmelCase : Union[str, Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[Any] = scheduler.timesteps
_UpperCAmelCase : Dict = self.dummy_model()
_UpperCAmelCase : str = self.dummy_sample_deter
_UpperCAmelCase : str = torch.manual_seed(0 )
for i, t in enumerate(_SCREAMING_SNAKE_CASE ):
# 1. predict noise residual
_UpperCAmelCase : str = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase : str = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
_UpperCAmelCase : List[Any] = pred_prev_sample
_UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase : List[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : str = self.scheduler_classes[0]
_UpperCAmelCase : str = self.get_scheduler_config()
_UpperCAmelCase : Optional[int] = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(2_5 )
_UpperCAmelCase : Optional[Any] = scheduler.timesteps
_UpperCAmelCase : int = self.dummy_model()
_UpperCAmelCase : Dict = self.dummy_sample_deter
_UpperCAmelCase : int = torch.manual_seed(0 )
for i, t in enumerate(_SCREAMING_SNAKE_CASE ):
# 1. predict noise residual
_UpperCAmelCase : int = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if i + 1 == timesteps.shape[0]:
_UpperCAmelCase : List[Any] = None
else:
_UpperCAmelCase : Dict = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase : str = scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_timestep=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
_UpperCAmelCase : List[Any] = pred_prev_sample
_UpperCAmelCase : str = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def __lowerCAmelCase ( self ) -> str:
pass
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
| 364 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , A , A=1_3 , A=7 , A=True , A=True , A=False , A=True , A=9_9 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Dict:
_UpperCAmelCase : Dict = parent
_UpperCAmelCase : int = batch_size
_UpperCAmelCase : Union[str, Any] = seq_length
_UpperCAmelCase : str = is_training
_UpperCAmelCase : Optional[Any] = use_input_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : str = use_labels
_UpperCAmelCase : List[str] = vocab_size
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : List[str] = num_attention_heads
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : str = hidden_act
_UpperCAmelCase : Optional[int] = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : str = max_position_embeddings
_UpperCAmelCase : Optional[int] = type_vocab_size
_UpperCAmelCase : List[str] = type_sequence_label_size
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : Optional[int] = num_labels
_UpperCAmelCase : int = num_choices
_UpperCAmelCase : Dict = scope
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : int = None
if self.use_input_mask:
_UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[Any] = None
if self.use_labels:
_UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> Tuple:
_UpperCAmelCase : int = DistilBertModel(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[int] = model(A , A )
_UpperCAmelCase : List[str] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> Any:
_UpperCAmelCase : int = DistilBertForMaskedLM(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Any = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> List[Any]:
_UpperCAmelCase : Tuple = DistilBertForQuestionAnswering(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[int] = model(
A , attention_mask=A , start_positions=A , end_positions=A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> str:
_UpperCAmelCase : List[Any] = self.num_labels
_UpperCAmelCase : Union[str, Any] = DistilBertForSequenceClassification(A )
model.to(A )
model.eval()
_UpperCAmelCase : int = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> List[Any]:
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Optional[int] = DistilBertForTokenClassification(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[Any] = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> List[Any]:
_UpperCAmelCase : str = self.num_choices
_UpperCAmelCase : Dict = DistilBertForMultipleChoice(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Optional[Any] = model(
A , attention_mask=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Dict = self.prepare_config_and_inputs()
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) : int = config_and_inputs
_UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
a__ =(
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ =True
a__ =True
a__ =True
a__ =True
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = DistilBertModelTester(self )
_UpperCAmelCase : List[Any] = ConfigTester(self , config_class=A , dim=3_7 )
def __lowerCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*A )
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*A )
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*A )
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*A )
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*A )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[int] = DistilBertModel.from_pretrained(A )
self.assertIsNotNone(A )
@slow
@require_torch_gpu
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Union[str, Any] = model_class(config=A )
_UpperCAmelCase : List[Any] = self._prepare_for_class(A , A )
_UpperCAmelCase : int = torch.jit.trace(
A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A , os.path.join(A , '''traced_model.pt''' ) )
_UpperCAmelCase : Optional[int] = torch.jit.load(os.path.join(A , '''traced_model.pt''' ) , map_location=A )
loaded(inputs_dict['''input_ids'''].to(A ) , inputs_dict['''attention_mask'''].to(A ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Dict = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
_UpperCAmelCase : List[str] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_UpperCAmelCase : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase : str = model(A , attention_mask=A )[0]
_UpperCAmelCase : Dict = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , A )
_UpperCAmelCase : Any = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A , atol=1E-4 ) )
| 68 | 0 |
"""simple docstring"""
A_ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
A_ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCAmelCase__ (snake_case__ : dict[int, list[int]] , snake_case__ : int , snake_case__ : list[bool] ):
"""simple docstring"""
_snake_case : Union[str, Any] = True
_snake_case : List[Any] = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(snake_case__ , snake_case__ , snake_case__ )
order.append(snake_case__ )
return order
def UpperCAmelCase__ (snake_case__ : dict[int, list[int]] , snake_case__ : int , snake_case__ : list[bool] ):
"""simple docstring"""
_snake_case : Dict = True
_snake_case : Dict = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(snake_case__ , snake_case__ , snake_case__ )
return component
def UpperCAmelCase__ (snake_case__ : dict[int, list[int]] ):
"""simple docstring"""
_snake_case : Any = len(snake_case__ ) * [False]
_snake_case : dict[int, list[int]] = {vert: [] for vert in range(len(snake_case__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(snake_case__ )
_snake_case : Optional[Any] = []
for i, was_visited in enumerate(snake_case__ ):
if not was_visited:
order += topology_sort(snake_case__ , snake_case__ , snake_case__ )
_snake_case : int = []
_snake_case : Dict = len(snake_case__ ) * [False]
for i in range(len(snake_case__ ) ):
_snake_case : Union[str, Any] = order[len(snake_case__ ) - i - 1]
if not visited[vert]:
_snake_case : Tuple = find_components(snake_case__ , snake_case__ , snake_case__ )
components_list.append(snake_case__ )
return components_list
| 64 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : int = {"vocab_file": "vocab.txt"}
_UpperCAmelCase : str = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
_UpperCAmelCase : Optional[Any] = {
"openbmb/cpm-ant-10b": 1_024,
}
def A ( lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = collections.OrderedDict()
with open(lowercase , 'r' , encoding='utf-8' ) as reader:
UpperCamelCase = reader.readlines()
for index, token in enumerate(lowercase ):
UpperCamelCase = token.rstrip('\n' )
UpperCamelCase = index
return vocab
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_="<unk>" , A_=200 ) -> Dict:
"""simple docstring"""
UpperCamelCase = vocab
UpperCamelCase = unk_token
UpperCamelCase = max_input_chars_per_word
def __UpperCamelCase ( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = list(A_ )
if len(A_ ) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase = 0
UpperCamelCase = []
while start < len(A_ ):
UpperCamelCase = len(A_ )
UpperCamelCase = None
while start < end:
UpperCamelCase = ''.join(chars[start:end] )
if substr in self.vocab:
UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(A_ )
UpperCamelCase = end
return sub_tokens
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : List[str] = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Any = ["input_ids", "attention_mask"]
__lowercase : Tuple = False
def __init__( self , A_ , A_="<d>" , A_="</d>" , A_="<s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="</n>" , A_="</_>" , A_="left" , **A_ , ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=A_ , eod_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , unk_token=A_ , line_token=A_ , space_token=A_ , padding_side=A_ , **A_ , )
UpperCamelCase = bod_token
UpperCamelCase = eod_token
UpperCamelCase = load_vocab(A_ )
UpperCamelCase = self.encoder[space_token]
UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda A_ : x[1] ) )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = []
for x in jieba.cut(A_ , cut_all=A_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(A_ ) )
return output_tokens
def __UpperCamelCase ( self , A_ , **A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = [i for i in token_ids if i >= 0]
UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(A_ , **A_ )
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
return token in self.encoder
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
return "".join(A_ )
def __UpperCamelCase ( self , A_ ) -> Tuple:
"""simple docstring"""
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
return self.decoder.get(A_ , self.unk_token )
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(A_ ):
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
UpperCamelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
UpperCamelCase = 0
if " " in self.encoder:
UpperCamelCase = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase = self.encoder['\n']
del self.encoder["\n"]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda A_ : x[1] ) )
with open(A_ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
UpperCamelCase = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __UpperCamelCase ( self , A_ , A_ = None , A_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is not None:
return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ ))
return [1] + ([0] * len(A_ ))
| 222 | 0 |
import logging
from transformers import PretrainedConfig
snake_case_ = logging.getLogger(__name__)
snake_case_ = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ (lowerCamelCase__ ):
__lowerCamelCase : Any = """bertabs"""
def __init__( self , a=3_0522 , a=512 , a=6 , a=512 , a=8 , a=512 , a=0.2 , a=6 , a=768 , a=8 , a=2048 , a=0.2 , **a , ):
super().__init__(**__snake_case)
lowercase__ : List[str] = vocab_size
lowercase__ : List[str] = max_pos
lowercase__ : str = enc_layers
lowercase__ : Optional[int] = enc_hidden_size
lowercase__ : Optional[int] = enc_heads
lowercase__ : List[str] = enc_ff_size
lowercase__ : Optional[Any] = enc_dropout
lowercase__ : Dict = dec_layers
lowercase__ : List[str] = dec_hidden_size
lowercase__ : str = dec_heads
lowercase__ : str = dec_ff_size
lowercase__ : Any = dec_dropout
| 358 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case_ = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
snake_case_ = {
'''junnyu/roformer_chinese_small''': 1_536,
'''junnyu/roformer_chinese_base''': 1_536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
snake_case_ = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase : Tuple = RoFormerTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ):
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
lowercase__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get('lowercase' , a) != do_lower_case
or pre_tok_state.get('strip_accents' , a) != strip_accents
):
lowercase__ : Optional[int] = getattr(a , pre_tok_state.pop('type'))
lowercase__ : str = do_lower_case
lowercase__ : Union[str, Any] = strip_accents
lowercase__ : int = pre_tok_class(**a)
lowercase__ : Optional[int] = do_lower_case
def __getstate__( self):
lowercase__ : str = self.__dict__.copy()
lowercase__ : Any = BertPreTokenizer()
return state
def __setstate__( self , a):
lowercase__ : Union[str, Any] = d
lowercase__ : int = self.__dict__['_tokenizer'].get_vocab()
lowercase__ : List[str] = PreTokenizer.custom(JiebaPreTokenizer(a))
def snake_case_ ( self , a , a=None):
lowercase__ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , a , a = None):
lowercase__ : Tuple = [self.sep_token_id]
lowercase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case_ ( self , a , a = None):
lowercase__ : Optional[Any] = self._tokenizer.model.save(a , name=a)
return tuple(a)
def snake_case_ ( self , a , a=None , a=None , a=False , **a , ):
lowercase__ : List[str] = BertPreTokenizer()
return super().save_pretrained(a , a , a , a , **a)
| 216 | 0 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def lowerCamelCase__ ( _a):
return input_array.reshape((input_array.size, 1))
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : List[Any] = np.nan
for i in range(_a):
SCREAMING_SNAKE_CASE : Any = features[:, labels == i]
SCREAMING_SNAKE_CASE : Tuple = data.mean(1)
# Centralize the data of class i
SCREAMING_SNAKE_CASE : Optional[int] = data - column_reshape(_a)
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_a , centered_data.T)
else:
# If covariance_sum is np.nan (i.e. first loop)
SCREAMING_SNAKE_CASE : Dict = np.dot(_a , centered_data.T)
return covariance_sum / features.shape[1]
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : List[Any] = features.mean(1)
SCREAMING_SNAKE_CASE : List[str] = np.nan
for i in range(_a):
SCREAMING_SNAKE_CASE : Optional[Any] = features[:, labels == i]
SCREAMING_SNAKE_CASE : int = data.shape[1]
SCREAMING_SNAKE_CASE : int = data.mean(1)
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_a) - column_reshape(_a) , (column_reshape(_a) - column_reshape(_a)).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
SCREAMING_SNAKE_CASE : Optional[Any] = device_data * np.dot(
column_reshape(_a) - column_reshape(_a) , (column_reshape(_a) - column_reshape(_a)).T , )
return covariance_sum / features.shape[1]
def lowerCamelCase__ ( _a , _a):
# Check if the features have been loaded
if features.any():
SCREAMING_SNAKE_CASE : Optional[int] = features.mean(1)
# Center the dataset
SCREAMING_SNAKE_CASE : Optional[int] = features - np.reshape(_a , (data_mean.size, 1))
SCREAMING_SNAKE_CASE : int = np.dot(_a , centered_data.T) / features.shape[1]
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = np.linalg.eigh(_a)
# Take all the columns in the reverse order (-1), and then takes only the first
SCREAMING_SNAKE_CASE : Optional[int] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
SCREAMING_SNAKE_CASE : int = np.dot(filtered_eigenvectors.T , _a)
logging.info("Principal Component Analysis computed")
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_a)
logging.error("Dataset empty")
raise AssertionError
def lowerCamelCase__ ( _a , _a , _a , _a):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = eigh(
covariance_between_classes(_a , _a , _a) , covariance_within_classes(_a , _a , _a) , )
SCREAMING_SNAKE_CASE : Any = eigenvectors[:, ::-1][:, :dimensions]
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = np.linalg.svd(_a)
SCREAMING_SNAKE_CASE : List[str] = svd_matrix[:, 0:dimensions]
SCREAMING_SNAKE_CASE : List[str] = np.dot(filtered_svd_matrix.T , _a)
logging.info("Linear Discriminant Analysis computed")
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_a)
logging.error("Dataset empty")
raise AssertionError
def lowerCamelCase__ ( ):
# Create dummy dataset with 2 classes and 3 features
SCREAMING_SNAKE_CASE : int = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]])
SCREAMING_SNAKE_CASE : Optional[Any] = np.array([0, 0, 0, 1, 1])
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : List[Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_a) as error_info:
SCREAMING_SNAKE_CASE : int = linear_discriminant_analysis(
_a , _a , _a , _a)
if isinstance(_a , np.ndarray):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes")
assert error_info.type is AssertionError
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Dict = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[str] = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]])
with pytest.raises(_a) as error_info:
SCREAMING_SNAKE_CASE : Union[str, Any] = principal_component_analysis(_a , _a)
if not np.allclose(_a , _a):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod() | 76 |
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : Any = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__snake_case : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 360 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=30 , _UpperCamelCase=4_00 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=True , _UpperCamelCase=1 / 2_55 , _UpperCamelCase=True , ):
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase__ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean
lowerCAmelCase__ = image_std
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_pad
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=False ):
"""simple docstring"""
if not batched:
lowerCAmelCase__ = image_inputs[0]
if isinstance(_UpperCamelCase , Image.Image ):
lowerCAmelCase__ , lowerCAmelCase__ = image.size
else:
lowerCAmelCase__ , lowerCAmelCase__ = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase__ = int(self.size['shortest_edge'] * h / w )
lowerCAmelCase__ = self.size['shortest_edge']
elif w > h:
lowerCAmelCase__ = self.size['shortest_edge']
lowerCAmelCase__ = int(self.size['shortest_edge'] * w / h )
else:
lowerCAmelCase__ = self.size['shortest_edge']
lowerCAmelCase__ = self.size['shortest_edge']
else:
lowerCAmelCase__ = []
for image in image_inputs:
lowerCAmelCase__ , lowerCAmelCase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase__ = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[0] )[0]
lowerCAmelCase__ = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : Dict = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = DeformableDetrImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_rescale' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_pad' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'size' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
lowerCAmelCase__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCamelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
lowerCAmelCase__ = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
# prepare image and target
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCAmelCase__ = json.loads(f.read() )
lowerCAmelCase__ = {'image_id': 3_97_69, 'annotations': target}
# encode them
lowerCAmelCase__ = DeformableDetrImageProcessor()
lowerCAmelCase__ = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , return_tensors='pt' )
# verify pixel values
lowerCAmelCase__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _UpperCamelCase )
lowerCAmelCase__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCamelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase__ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCamelCase ) )
# verify boxes
lowerCAmelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCamelCase )
lowerCAmelCase__ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCamelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCamelCase ) )
# verify is_crowd
lowerCAmelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCamelCase ) )
# verify class_labels
lowerCAmelCase__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCamelCase ) )
# verify orig_size
lowerCAmelCase__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCamelCase ) )
# verify size
lowerCAmelCase__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCamelCase ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
# prepare image, target and masks_path
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCAmelCase__ = json.loads(f.read() )
lowerCAmelCase__ = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
lowerCAmelCase__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCAmelCase__ = DeformableDetrImageProcessor(format='coco_panoptic' )
lowerCAmelCase__ = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , masks_path=_UpperCamelCase , return_tensors='pt' )
# verify pixel values
lowerCAmelCase__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _UpperCamelCase )
lowerCAmelCase__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCamelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase__ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCamelCase ) )
# verify boxes
lowerCAmelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCamelCase )
lowerCAmelCase__ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCamelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCamelCase ) )
# verify is_crowd
lowerCAmelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCamelCase ) )
# verify class_labels
lowerCAmelCase__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCamelCase ) )
# verify masks
lowerCAmelCase__ = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _UpperCamelCase )
# verify orig_size
lowerCAmelCase__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCamelCase ) )
# verify size
lowerCAmelCase__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCamelCase ) )
| 122 | 0 |
from __future__ import annotations
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , a_ : Optional[int]=None ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = data
__UpperCAmelCase : Any = None
def __repr__( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Dict = []
__UpperCAmelCase : Any = self
while temp:
string_rep.append(F'{temp.data}' )
__UpperCAmelCase : Dict = temp.next
return "->".join(a_ )
def a ( _UpperCAmelCase : list ):
'''simple docstring'''
if not elements_list:
raise Exception('''The Elements List is empty''' )
__UpperCAmelCase : int = Node(elements_list[0] )
for i in range(1 , len(_UpperCAmelCase ) ):
__UpperCAmelCase : Any = Node(elements_list[i] )
__UpperCAmelCase : Optional[int] = current.next
return head
def a ( _UpperCAmelCase : Node ):
'''simple docstring'''
if head_node is not None and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
print_reverse(head_node.next )
print(head_node.data )
def a ( ):
'''simple docstring'''
from doctest import testmod
testmod()
__UpperCAmelCase : Tuple = make_linked_list([14, 52, 14, 12, 43] )
print('''Linked List:''' )
print(_UpperCAmelCase )
print('''Elements in Reverse:''' )
print_reverse(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 226 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def a ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = k_size // 2
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__UpperCAmelCase : Any = 1 / (2 * pi * sigma) * exp(-(square(_UpperCAmelCase ) + square(_UpperCAmelCase )) / (2 * square(_UpperCAmelCase )) )
return g
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = image.shape[0], image.shape[1]
# dst image height and width
__UpperCAmelCase : str = height - k_size + 1
__UpperCAmelCase : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__UpperCAmelCase : str = zeros((dst_height * dst_width, k_size * k_size) )
__UpperCAmelCase : Optional[Any] = 0
for i, j in product(range(_UpperCAmelCase ) , range(_UpperCAmelCase ) ):
__UpperCAmelCase : int = ravel(image[i : i + k_size, j : j + k_size] )
__UpperCAmelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
__UpperCAmelCase : Tuple = gen_gaussian_kernel(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : List[Any] = ravel(_UpperCAmelCase )
# reshape and get the dst image
__UpperCAmelCase : Optional[Any] = dot(_UpperCAmelCase , _UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase ).astype(_UpperCAmelCase )
return dst
if __name__ == "__main__":
# read original image
__A =imread(R"../image_data/lena.jpg")
# turn image in gray scale value
__A =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__A =gaussian_filter(gray, 3, sigma=1)
__A =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 226 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__snake_case : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
__snake_case = field(
default=UpperCamelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__snake_case = field(
default=UpperCamelCase_ , metadata={'help': 'The column name of the images in the files.'} )
__snake_case = field(default=UpperCamelCase_ , metadata={'help': 'A folder containing the training data.'} )
__snake_case = field(default=UpperCamelCase_ , metadata={'help': 'A folder containing the validation data.'} )
__snake_case = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
__snake_case = field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__snake_case = field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
A__ : Dict ={}
if self.train_dir is not None:
A__ : Optional[Any] =self.train_dir
if self.validation_dir is not None:
A__ : Optional[Any] =self.validation_dir
A__ : str =data_files if data_files else None
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = field(
default=UpperCamelCase_ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
__snake_case = field(
default=UpperCamelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
__snake_case = field(
default=UpperCamelCase_ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__snake_case = field(
default=UpperCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
__snake_case = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__snake_case = field(default=UpperCamelCase_ , metadata={'help': 'Name or path of preprocessor config.'} )
__snake_case = field(
default=UpperCamelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__snake_case = field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
__snake_case = field(
default=UpperCamelCase_ , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class lowerCamelCase ( UpperCamelCase_ ):
'''simple docstring'''
__snake_case = field(
default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def __lowerCamelCase ( __snake_case : Any ):
"""simple docstring"""
A__ : Union[str, Any] =torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def __lowerCamelCase ( ):
"""simple docstring"""
A__ : Tuple =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ : int =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ : Dict =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""", __lowerCAmelCase, __lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ : List[Any] =training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
A__ : Any =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ : Optional[int] =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
A__ : str =load_dataset(
data_args.dataset_name, data_args.dataset_config_name, data_files=data_args.data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# If we don't have a validation split, split off a percentage of train as validation.
A__ : Any =None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, __lowerCAmelCase ) and data_args.train_val_split > 0.0:
A__ : List[Any] =ds["""train"""].train_test_split(data_args.train_val_split )
A__ : List[str] =split["""train"""]
A__ : int =split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ : int ={
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
A__ : List[Any] =ViTMAEConfig.from_pretrained(model_args.config_name, **__lowerCAmelCase )
elif model_args.model_name_or_path:
A__ : List[str] =ViTMAEConfig.from_pretrained(model_args.model_name_or_path, **__lowerCAmelCase )
else:
A__ : int =ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
A__ : int =ViTImageProcessor.from_pretrained(model_args.image_processor_name, **__lowerCAmelCase )
elif model_args.model_name_or_path:
A__ : Tuple =ViTImageProcessor.from_pretrained(model_args.model_name_or_path, **__lowerCAmelCase )
else:
A__ : Optional[Any] =ViTImageProcessor()
# create model
if model_args.model_name_or_path:
A__ : List[str] =ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path, from_tf=bool(""".ckpt""" in model_args.model_name_or_path ), config=__lowerCAmelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info("""Training new model from scratch""" )
A__ : Union[str, Any] =ViTMAEForPreTraining(__lowerCAmelCase )
if training_args.do_train:
A__ : List[str] =ds["""train"""].column_names
else:
A__ : Any =ds["""validation"""].column_names
if data_args.image_column_name is not None:
A__ : List[Any] =data_args.image_column_name
elif "image" in column_names:
A__ : Tuple ="""image"""
elif "img" in column_names:
A__ : Dict ="""img"""
else:
A__ : List[str] =column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
A__ : str =image_processor.size["""shortest_edge"""]
else:
A__ : List[str] =(image_processor.size["""height"""], image_processor.size["""width"""])
A__ : Any =Compose(
[
Lambda(lambda __snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(__lowerCAmelCase, scale=(0.2, 1.0), interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean, std=image_processor.image_std ),
] )
def preprocess_images(__snake_case : Any ):
A__ : Optional[int] =[transforms(__lowerCAmelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
A__ : Union[str, Any] =ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__lowerCAmelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
A__ : Any =(
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__lowerCAmelCase )
# Compute absolute learning rate
A__ : Dict =(
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
A__ : List[str] =training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
A__ : str =Trainer(
model=__lowerCAmelCase, args=__lowerCAmelCase, train_dataset=ds["""train"""] if training_args.do_train else None, eval_dataset=ds["""validation"""] if training_args.do_eval else None, tokenizer=__lowerCAmelCase, data_collator=__lowerCAmelCase, )
# Training
if training_args.do_train:
A__ : List[Any] =None
if training_args.resume_from_checkpoint is not None:
A__ : int =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ : Tuple =last_checkpoint
A__ : Tuple =trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("""train""", train_result.metrics )
trainer.save_metrics("""train""", train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A__ : List[Any] =trainer.evaluate()
trainer.log_metrics("""eval""", __lowerCAmelCase )
trainer.save_metrics("""eval""", __lowerCAmelCase )
# Write model card and (optionally) push to hub
A__ : str ={
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCAmelCase )
else:
trainer.create_model_card(**__lowerCAmelCase )
def __lowerCamelCase ( __snake_case : str ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 362 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Optional[int] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'gpt_bigcode'
__snake_case = ['past_key_values']
__snake_case = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str]=5_02_57 , lowerCAmelCase_ : str=10_24 , lowerCAmelCase_ : str=7_68 , lowerCAmelCase_ : str=12 , lowerCAmelCase_ : int=12 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]="gelu_pytorch_tanh" , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Dict=1e-5 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : str=5_02_56 , lowerCAmelCase_ : Dict=5_02_56 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=True , **lowerCAmelCase_ : Optional[Any] , ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] =vocab_size
A__ : Optional[Any] =n_positions
A__ : List[str] =n_embd
A__ : str =n_layer
A__ : Optional[int] =n_head
A__ : Optional[int] =n_inner
A__ : int =activation_function
A__ : int =resid_pdrop
A__ : int =embd_pdrop
A__ : Dict =attn_pdrop
A__ : Any =layer_norm_epsilon
A__ : List[Any] =initializer_range
A__ : Dict =scale_attn_weights
A__ : Any =use_cache
A__ : List[Any] =attention_softmax_in_fpaa
A__ : Optional[int] =scale_attention_softmax_in_fpaa
A__ : Dict =multi_query
A__ : List[str] =bos_token_id
A__ : Any =eos_token_id
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
| 136 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=64 , A=2 , A=3 , A=True , A=True , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=10 , A=0.02 , A=[1, 16, 4, 4] , A=None , ) -> Union[str, Any]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = type_sequence_label_size
_a = initializer_range
_a = scope
_a = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_a = (self.image_size // 32) ** 2
_a = num_patches + 1
def a__ (self ) -> str:
"""simple docstring"""
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A , )
def a__ (self , A , A , A ) -> Any:
"""simple docstring"""
_a = ViTHybridModel(config=A )
model.to(A )
model.eval()
_a = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A ) -> int:
"""simple docstring"""
_a = self.type_sequence_label_size
_a = ViTHybridForImageClassification(A )
model.to(A )
model.eval()
_a = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__lowerCamelCase : Any = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__lowerCamelCase : List[str] = False
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : str = False
def a__ (self ) -> str:
"""simple docstring"""
_a = ViTHybridModelTester(self )
_a = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def a__ (self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
def a__ (self ) -> Any:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = _config_zero_init(A )
for model_class in self.all_model_classes:
_a = model_class(config=A )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_a = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def a__ (self ) -> Any:
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = ViTHybridModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowerCAmelCase ():
"""simple docstring"""
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ) -> Tuple:
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def a__ (self ) -> Dict:
"""simple docstring"""
_a = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=A , return_tensors='''pt''' ).to(A )
# forward pass
with torch.no_grad():
_a = model(**A )
# verify the logits
_a = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , A )
_a = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
@slow
@require_accelerate
def a__ (self ) -> Dict:
"""simple docstring"""
_a = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
_a = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
_a = prepare_img()
_a = image_processor(images=A , return_tensors='''pt''' )
_a = model(**A )
_a = outputs.logits
# model predicts one of the 1000 ImageNet classes
_a = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 211 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 211 | 1 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __SCREAMING_SNAKE_CASE ( enum.Enum ):
'''simple docstring'''
_a = 0
_a = 1
_a = 2
@add_end_docstrings(lowerCAmelCase_ )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self : str, *lowerCamelCase : Dict, **lowerCamelCase : Any )-> Optional[int]:
super().__init__(*lowerCamelCase, **lowerCamelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCamelCase__ : Any =None
if self.model.config.prefix is not None:
lowerCamelCase__ : int =self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCamelCase__ : Union[str, Any] =self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self._sanitize_parameters(prefix=lowerCamelCase, **self._forward_params )
lowerCamelCase__ : Union[str, Any] ={**self._preprocess_params, **preprocess_params}
lowerCamelCase__ : Dict ={**self._forward_params, **forward_params}
def snake_case ( self : List[str], lowerCamelCase : Optional[int]=None, lowerCamelCase : Any=None, lowerCamelCase : Tuple=None, lowerCamelCase : Tuple=None, lowerCamelCase : int=None, lowerCamelCase : List[str]=None, lowerCamelCase : Any=None, lowerCamelCase : Dict=None, **lowerCamelCase : int, )-> str:
lowerCamelCase__ : Union[str, Any] ={}
if prefix is not None:
lowerCamelCase__ : List[Any] =prefix
if prefix:
lowerCamelCase__ : Tuple =self.tokenizer(
lowerCamelCase, padding=lowerCamelCase, add_special_tokens=lowerCamelCase, return_tensors=self.framework )
lowerCamelCase__ : Tuple =prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
''' [None, \'hole\']''' )
lowerCamelCase__ : str =handle_long_generation
preprocess_params.update(lowerCamelCase )
lowerCamelCase__ : List[str] =generate_kwargs
lowerCamelCase__ : Optional[Any] ={}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
lowerCamelCase__ : Dict =ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
lowerCamelCase__ : str =ReturnType.TENSORS
if return_type is not None:
lowerCamelCase__ : Optional[int] =return_type
if clean_up_tokenization_spaces is not None:
lowerCamelCase__ : Optional[int] =clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCamelCase__ : List[str] =self.tokenizer.encode(lowerCamelCase, add_special_tokens=lowerCamelCase )
if len(lowerCamelCase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowerCamelCase__ : Dict =stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def snake_case ( self : Optional[int], *lowerCamelCase : Tuple, **lowerCamelCase : int )-> Dict:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*lowerCamelCase, **lowerCamelCase )
def __call__( self : Any, lowerCamelCase : Tuple, **lowerCamelCase : Dict )-> Union[str, Any]:
return super().__call__(lowerCamelCase, **lowerCamelCase )
def snake_case ( self : str, lowerCamelCase : List[Any], lowerCamelCase : List[str]="", lowerCamelCase : int=None, **lowerCamelCase : List[Any] )-> List[Any]:
lowerCamelCase__ : List[Any] =self.tokenizer(
prefix + prompt_text, padding=lowerCamelCase, add_special_tokens=lowerCamelCase, return_tensors=self.framework )
lowerCamelCase__ : Optional[int] =prompt_text
if handle_long_generation == "hole":
lowerCamelCase__ : Union[str, Any] =inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCamelCase__ : int =generate_kwargs['''max_new_tokens''']
else:
lowerCamelCase__ : Dict =generate_kwargs.get('''max_length''', self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCamelCase__ : int =self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
lowerCamelCase__ : Tuple =inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
lowerCamelCase__ : List[str] =inputs['''attention_mask'''][:, -keep_length:]
return inputs
def snake_case ( self : Optional[Any], lowerCamelCase : Any, **lowerCamelCase : Any )-> int:
lowerCamelCase__ : Union[str, Any] =model_inputs['''input_ids''']
lowerCamelCase__ : List[str] =model_inputs.get('''attention_mask''', lowerCamelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCamelCase__ : Tuple =None
lowerCamelCase__ : str =None
lowerCamelCase__ : Optional[Any] =1
else:
lowerCamelCase__ : Optional[Any] =input_ids.shape[0]
lowerCamelCase__ : List[Any] =model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCamelCase__ : Dict =generate_kwargs.pop('''prefix_length''', 0 )
if prefix_length > 0:
lowerCamelCase__ : Any ='''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCamelCase__ : Any =generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCamelCase__ : int ='''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCamelCase__ : Tuple =self.model.generate(input_ids=lowerCamelCase, attention_mask=lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =generated_sequence.shape[0]
if self.framework == "pt":
lowerCamelCase__ : Dict =generated_sequence.reshape(lowerCamelCase, out_b // in_b, *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowerCamelCase__ : Dict =tf.reshape(lowerCamelCase, (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def snake_case ( self : Optional[Any], lowerCamelCase : Optional[Any], lowerCamelCase : Union[str, Any]=ReturnType.FULL_TEXT, lowerCamelCase : Dict=True )-> int:
lowerCamelCase__ : Optional[int] =model_outputs['''generated_sequence'''][0]
lowerCamelCase__ : Dict =model_outputs['''input_ids''']
lowerCamelCase__ : Dict =model_outputs['''prompt_text''']
lowerCamelCase__ : List[Any] =generated_sequence.numpy().tolist()
lowerCamelCase__ : str =[]
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCamelCase__ : Union[str, Any] ={'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCamelCase__ : str =self.tokenizer.decode(
lowerCamelCase, skip_special_tokens=lowerCamelCase, clean_up_tokenization_spaces=lowerCamelCase, )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCamelCase__ : Tuple =0
else:
lowerCamelCase__ : List[Any] =len(
self.tokenizer.decode(
input_ids[0], skip_special_tokens=lowerCamelCase, clean_up_tokenization_spaces=lowerCamelCase, ) )
if return_type == ReturnType.FULL_TEXT:
lowerCamelCase__ : Union[str, Any] =prompt_text + text[prompt_length:]
else:
lowerCamelCase__ : List[str] =text[prompt_length:]
lowerCamelCase__ : List[Any] ={'''generated_text''': all_text}
records.append(lowerCamelCase )
return records
| 272 |
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case__ ( __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int , __lowerCamelCase : float = 1 , __lowerCamelCase : float = 1 , __lowerCamelCase : float = 1.0e4 , __lowerCamelCase : bool = False , __lowerCamelCase : float = 1.0 , ):
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
lowerCamelCase__ : Any =float(embedding_dim // 2 )
lowerCamelCase__ : List[str] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCamelCase__ : int =min_timescale * jnp.exp(jnp.arange(__lowerCamelCase , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCamelCase__ : Tuple =jnp.expand_dims(__lowerCamelCase , 1 ) * jnp.expand_dims(__lowerCamelCase , 0 )
# scale embeddings
lowerCamelCase__ : List[str] =scale * emb
if flip_sin_to_cos:
lowerCamelCase__ : int =jnp.concatenate([jnp.cos(__lowerCamelCase ), jnp.sin(__lowerCamelCase )] , axis=1 )
else:
lowerCamelCase__ : List[str] =jnp.concatenate([jnp.sin(__lowerCamelCase ), jnp.cos(__lowerCamelCase )] , axis=1 )
lowerCamelCase__ : str =jnp.reshape(__lowerCamelCase , [jnp.shape(__lowerCamelCase )[0], embedding_dim] )
return signal
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 3_2
_a = jnp.floataa
@nn.compact
def __call__( self : Optional[Any], lowerCamelCase : int )-> Any:
lowerCamelCase__ : Optional[Any] =nn.Dense(self.time_embed_dim, dtype=self.dtype, name='''linear_1''' )(lowerCamelCase )
lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase )
lowerCamelCase__ : Any =nn.Dense(self.time_embed_dim, dtype=self.dtype, name='''linear_2''' )(lowerCamelCase )
return temb
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 3_2
_a = False
_a = 1
@nn.compact
def __call__( self : Any, lowerCamelCase : int )-> int:
return get_sinusoidal_embeddings(
lowerCamelCase, embedding_dim=self.dim, flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.freq_shift )
| 272 | 1 |
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = [1]
for i in range(2 , _a ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Optional[int] = list(range(_a ) )
# Find permutation
while factorials:
lowerCAmelCase__ : Any = factorials.pop()
lowerCAmelCase__ , lowerCAmelCase__ : str = divmod(_a , _a )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowerCamelCase = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowerCamelCase = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Dict = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_a )[0]
@deprecated(_a , '''Please use tf.data to implement this functionality.''' )
def lowerCamelCase_ ( _a ):
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_a ) as bytestream:
lowerCAmelCase__ : Any = _readaa(_a )
if magic != 2_051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
lowerCAmelCase__ : Any = _readaa(_a )
lowerCAmelCase__ : Tuple = _readaa(_a )
lowerCAmelCase__ : List[Any] = _readaa(_a )
lowerCAmelCase__ : Union[str, Any] = bytestream.read(rows * cols * num_images )
lowerCAmelCase__ : List[Any] = numpy.frombuffer(_a , dtype=numpy.uinta )
lowerCAmelCase__ : int = data.reshape(_a , _a , _a , 1 )
return data
@deprecated(_a , '''Please use tf.one_hot on tensors.''' )
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = labels_dense.shape[0]
lowerCAmelCase__ : Optional[Any] = numpy.arange(_a ) * num_classes
lowerCAmelCase__ : str = numpy.zeros((num_labels, num_classes) )
lowerCAmelCase__ : Optional[Any] = 1
return labels_one_hot
@deprecated(_a , '''Please use tf.data to implement this functionality.''' )
def lowerCamelCase_ ( _a , _a=False , _a=10 ):
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_a ) as bytestream:
lowerCAmelCase__ : Optional[int] = _readaa(_a )
if magic != 2_049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
lowerCAmelCase__ : Union[str, Any] = _readaa(_a )
lowerCAmelCase__ : Tuple = bytestream.read(_a )
lowerCAmelCase__ : Dict = numpy.frombuffer(_a , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_a , _a )
return labels
class _a :
@deprecated(
_SCREAMING_SNAKE_CASE , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple=False , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Optional[Any]=dtypes.floataa , _SCREAMING_SNAKE_CASE : List[str]=True , _SCREAMING_SNAKE_CASE : List[str]=None , )-> List[Any]:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = random_seed.get_seed(_SCREAMING_SNAKE_CASE )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCAmelCase__ : Optional[int] = dtypes.as_dtype(_SCREAMING_SNAKE_CASE ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
lowerCAmelCase__ : int = 1_0000
lowerCAmelCase__ : List[Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
lowerCAmelCase__ : List[Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCAmelCase__ : Tuple = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCAmelCase__ : Any = images.astype(numpy.floataa )
lowerCAmelCase__ : Any = numpy.multiply(_SCREAMING_SNAKE_CASE , 1.0 / 255.0 )
lowerCAmelCase__ : Tuple = images
lowerCAmelCase__ : Tuple = labels
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Tuple = 0
@property
def UpperCAmelCase__( self : Tuple )-> Dict:
return self._images
@property
def UpperCAmelCase__( self : Tuple )-> Optional[int]:
return self._labels
@property
def UpperCAmelCase__( self : Tuple )-> Dict:
return self._num_examples
@property
def UpperCAmelCase__( self : Tuple )-> Any:
return self._epochs_completed
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict=False , _SCREAMING_SNAKE_CASE : Optional[int]=True )-> List[str]:
if fake_data:
lowerCAmelCase__ : Dict = [1] * 784
lowerCAmelCase__ : Union[str, Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_SCREAMING_SNAKE_CASE )],
[fake_label for _ in range(_SCREAMING_SNAKE_CASE )],
)
lowerCAmelCase__ : str = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCAmelCase__ : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = self.images[perma]
lowerCAmelCase__ : Tuple = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCAmelCase__ : Any = self._num_examples - start
lowerCAmelCase__ : List[str] = self._images[start : self._num_examples]
lowerCAmelCase__ : Tuple = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCAmelCase__ : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = self.images[perm]
lowerCAmelCase__ : List[Any] = self.labels[perm]
# Start next epoch
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : Union[str, Any] = batch_size - rest_num_examples
lowerCAmelCase__ : Any = self._index_in_epoch
lowerCAmelCase__ : Optional[Any] = self._images[start:end]
lowerCAmelCase__ : Optional[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCAmelCase__ : Dict = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_a , '''Please write your own downloading logic.''' )
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
if not gfile.Exists(_a ):
gfile.MakeDirs(_a )
lowerCAmelCase__ : str = os.path.join(_a , _a )
if not gfile.Exists(_a ):
urllib.request.urlretrieve(_a , _a ) # noqa: S310
with gfile.GFile(_a ) as f:
lowerCAmelCase__ : Optional[Any] = f.size()
print('''Successfully downloaded''' , _a , _a , '''bytes.''' )
return filepath
@deprecated(
_a , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def lowerCamelCase_ ( _a , _a=False , _a=False , _a=dtypes.floataa , _a=True , _a=5_000 , _a=None , _a=DEFAULT_SOURCE_URL , ):
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_a , one_hot=_a , dtype=_a , seed=_a )
lowerCAmelCase__ : Tuple = fake()
lowerCAmelCase__ : Union[str, Any] = fake()
lowerCAmelCase__ : Tuple = fake()
return _Datasets(train=_a , validation=_a , test=_a )
if not source_url: # empty string check
lowerCAmelCase__ : Optional[Any] = DEFAULT_SOURCE_URL
lowerCAmelCase__ : Tuple = '''train-images-idx3-ubyte.gz'''
lowerCAmelCase__ : Dict = '''train-labels-idx1-ubyte.gz'''
lowerCAmelCase__ : List[str] = '''t10k-images-idx3-ubyte.gz'''
lowerCAmelCase__ : Optional[int] = '''t10k-labels-idx1-ubyte.gz'''
lowerCAmelCase__ : Optional[Any] = _maybe_download(
_a , _a , source_url + train_images_file )
with gfile.Open(_a , '''rb''' ) as f:
lowerCAmelCase__ : Optional[Any] = _extract_images(_a )
lowerCAmelCase__ : Any = _maybe_download(
_a , _a , source_url + train_labels_file )
with gfile.Open(_a , '''rb''' ) as f:
lowerCAmelCase__ : Any = _extract_labels(_a , one_hot=_a )
lowerCAmelCase__ : Any = _maybe_download(
_a , _a , source_url + test_images_file )
with gfile.Open(_a , '''rb''' ) as f:
lowerCAmelCase__ : str = _extract_images(_a )
lowerCAmelCase__ : Dict = _maybe_download(
_a , _a , source_url + test_labels_file )
with gfile.Open(_a , '''rb''' ) as f:
lowerCAmelCase__ : int = _extract_labels(_a , one_hot=_a )
if not 0 <= validation_size <= len(_a ):
lowerCAmelCase__ : Dict = (
'''Validation size should be between 0 and '''
f'{len(_a )}. Received: {validation_size}.'
)
raise ValueError(_a )
lowerCAmelCase__ : List[str] = train_images[:validation_size]
lowerCAmelCase__ : Any = train_labels[:validation_size]
lowerCAmelCase__ : Optional[Any] = train_images[validation_size:]
lowerCAmelCase__ : Optional[int] = train_labels[validation_size:]
lowerCAmelCase__ : Optional[Any] = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
lowerCAmelCase__ : List[str] = _DataSet(_a , _a , **_a )
lowerCAmelCase__ : Dict = _DataSet(_a , _a , **_a )
lowerCAmelCase__ : Dict = _DataSet(_a , _a , **_a )
return _Datasets(train=_a , validation=_a , test=_a )
| 131 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {"vocab_file": "spiece.model"}
__UpperCamelCase : Optional[int] = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Optional[int] = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
__UpperCamelCase : Tuple = "▁"
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self :Optional[Any] , __magic_name__ :str , __magic_name__ :Tuple="</s>" , __magic_name__ :Any="<unk>" , __magic_name__ :Dict="<pad>" , __magic_name__ :Optional[Any]=100 , __magic_name__ :str=None , __magic_name__ :Optional[Dict[str, Any]] = None , __magic_name__ :Optional[Any]=True , **__magic_name__ :Optional[int] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
a = [F'<extra_id_{i}>' for i in range(__magic_name__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
a = len(set(filter(lambda __magic_name__ : bool("""extra_id""" in str(__magic_name__ ) ) , __magic_name__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
F'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
a = legacy
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , extra_ids=__magic_name__ , additional_special_tokens=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , legacy=__magic_name__ , **__magic_name__ , )
a = vocab_file
a = extra_ids
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__magic_name__ )
@staticmethod
def lowerCamelCase__ ( __magic_name__ :Dict , __magic_name__ :List[Any] , __magic_name__ :Any ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
a = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , __magic_name__ , )
return max_model_length
@property
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None , __magic_name__ :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__magic_name__ )) + [1]
return ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1]
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
return list(
set(filter(lambda __magic_name__ : bool(re.search(r"""<extra_id_\d+>""" , __magic_name__ ) ) is not None , self.additional_special_tokens ) ) )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
return [self._convert_token_to_id(__magic_name__ ) for token in self.get_sentinel_tokens()]
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[int] ):
'''simple docstring'''
if len(__magic_name__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCamelCase__ ( self :str , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = self._add_eos_if_not_present(__magic_name__ )
if token_ids_a is None:
return token_ids_a
else:
a = self._add_eos_if_not_present(__magic_name__ )
return token_ids_a + token_ids_a
def __getstate__( self :Dict ):
'''simple docstring'''
a = self.__dict__.copy()
a = None
return state
def __setstate__( self :str , __magic_name__ :List[str] ):
'''simple docstring'''
a = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self :Any , __magic_name__ :"TextInput" , **__magic_name__ :int ):
'''simple docstring'''
if not self.legacy:
a = SPIECE_UNDERLINE + text.replace(__magic_name__ , """ """ )
return super().tokenize(__magic_name__ , **__magic_name__ )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Any , **__magic_name__ :Any ):
'''simple docstring'''
if not self.legacy:
a = text.startswith(__magic_name__ )
if is_first:
a = text[1:]
a = self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(__magic_name__ ):
a = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def lowerCamelCase__ ( self :str , __magic_name__ :Tuple ):
'''simple docstring'''
if token.startswith("""<extra_id_""" ):
a = re.match(r"""<extra_id_(\d+)>""" , __magic_name__ )
a = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(__magic_name__ )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :Any ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
a = self.sp_model.IdToPiece(__magic_name__ )
else:
a = F'<extra_id_{self.vocab_size - 1 - index}>'
return token
def lowerCamelCase__ ( self :Dict , __magic_name__ :Tuple ):
'''simple docstring'''
a = []
a = """"""
a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__magic_name__ ) + token
a = True
a = []
else:
current_sub_tokens.append(__magic_name__ )
a = False
out_string += self.sp_model.decode(__magic_name__ )
return out_string.strip()
def lowerCamelCase__ ( self :Any , __magic_name__ :str , __magic_name__ :Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__magic_name__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , """wb""" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
| 353 |
from copy import deepcopy
class __lowerCAmelCase :
def __init__( self :Union[str, Any] , __magic_name__ :list[int] | None = None , __magic_name__ :int | None = None ):
'''simple docstring'''
if arr is None and size is not None:
a = size
a = [0] * size
elif arr is not None:
self.init(__magic_name__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def lowerCamelCase__ ( self :Dict , __magic_name__ :list[int] ):
'''simple docstring'''
a = len(__magic_name__ )
a = deepcopy(__magic_name__ )
for i in range(1 , self.size ):
a = self.next_(__magic_name__ )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
a = self.next_(__magic_name__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCamelCase__ ( __magic_name__ :int ):
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def lowerCamelCase__ ( __magic_name__ :int ):
'''simple docstring'''
return index - (index & (-index))
def lowerCamelCase__ ( self :Any , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
a = self.next_(__magic_name__ )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
self.add(__magic_name__ , value - self.get(__magic_name__ ) )
def lowerCamelCase__ ( self :int , __magic_name__ :int ):
'''simple docstring'''
if right == 0:
return 0
a = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
a = self.prev(__magic_name__ )
return result
def lowerCamelCase__ ( self :int , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
return self.prefix(__magic_name__ ) - self.prefix(__magic_name__ )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :int ):
'''simple docstring'''
return self.query(__magic_name__ , index + 1 )
def lowerCamelCase__ ( self :Dict , __magic_name__ :int ):
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
a = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
a = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 0 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
SCREAMING_SNAKE_CASE_:List[str] = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
SCREAMING_SNAKE_CASE_:str = json.load(f)
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return FSMTTokenizer.from_pretrained(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Any = FSMTForConditionalGeneration.from_pretrained(lowerCamelCase__ ).to(lowerCamelCase__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
A : Any = f'''facebook/wmt19-{pair}'''
A : int = self.get_tokenizer(lowerCamelCase__ )
A : int = self.get_model(lowerCamelCase__ )
A : Tuple = bleu_data[pair]["""src"""]
A : Optional[int] = bleu_data[pair]["""tgt"""]
A : str = tokenizer(lowerCamelCase__, return_tensors="""pt""", truncation=lowerCamelCase__, padding="""longest""" ).to(lowerCamelCase__ )
A : Any = model.generate(
input_ids=batch.input_ids, num_beams=8, )
A : Optional[Any] = tokenizer.batch_decode(
lowerCamelCase__, skip_special_tokens=lowerCamelCase__, clean_up_tokenization_spaces=lowerCamelCase__ )
A : Union[str, Any] = calculate_bleu(lowerCamelCase__, lowerCamelCase__ )
print(lowerCamelCase__ )
self.assertGreaterEqual(scores["""bleu"""], lowerCamelCase__ )
| 116 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Dict = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = "camembert"
def __init__( self, lowerCamelCase__=3_0522, lowerCamelCase__=768, lowerCamelCase__=12, lowerCamelCase__=12, lowerCamelCase__=3072, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=2, lowerCamelCase__=0.02, lowerCamelCase__=1e-12, lowerCamelCase__=1, lowerCamelCase__=0, lowerCamelCase__=2, lowerCamelCase__="absolute", lowerCamelCase__=True, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(pad_token_id=lowerCamelCase__, bos_token_id=lowerCamelCase__, eos_token_id=lowerCamelCase__, **lowerCamelCase__ )
A : List[Any] = vocab_size
A : Dict = hidden_size
A : str = num_hidden_layers
A : List[Any] = num_attention_heads
A : List[str] = hidden_act
A : Tuple = intermediate_size
A : Tuple = hidden_dropout_prob
A : List[Any] = attention_probs_dropout_prob
A : Optional[int] = max_position_embeddings
A : Tuple = type_vocab_size
A : List[Any] = initializer_range
A : str = layer_norm_eps
A : Tuple = position_embedding_type
A : str = use_cache
A : Any = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
if self.task == "multiple-choice":
A : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 116 | 1 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def __lowercase ( *_UpperCAmelCase : str ,**_UpperCAmelCase : str ):
pass
@is_pipeline_test
@require_vision
@require_torch
class __magic_name__ ( unittest.TestCase ):
lowerCAmelCase : List[Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __lowercase ( self : Dict ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Dict ):
_a : Tuple = pipeline(
'zero-shot-object-detection' ,model='hf-internal-testing/tiny-random-owlvit-object-detection' )
_a : Union[str, Any] = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def __lowercase ( self : List[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : List[str] ):
_a : Optional[Any] = object_detector(examples[0] ,threshold=0.0 )
_a : Union[str, Any] = len(_UpperCAmelCase )
self.assertGreater(_UpperCAmelCase ,0 )
self.assertEqual(
_UpperCAmelCase ,[
{
'score': ANY(_UpperCAmelCase ),
'label': ANY(_UpperCAmelCase ),
'box': {'xmin': ANY(_UpperCAmelCase ), 'ymin': ANY(_UpperCAmelCase ), 'xmax': ANY(_UpperCAmelCase ), 'ymax': ANY(_UpperCAmelCase )},
}
for i in range(_UpperCAmelCase )
] ,)
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def __lowercase ( self : Dict ):
pass
@require_torch
def __lowercase ( self : Dict ):
_a : Optional[int] = pipeline(
'zero-shot-object-detection' ,model='hf-internal-testing/tiny-random-owlvit-object-detection' )
_a : int = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' ,candidate_labels=['cat', 'remote', 'couch'] ,threshold=0.64 ,)
self.assertEqual(
nested_simplify(_UpperCAmelCase ,decimals=4 ) ,[
{'score': 0.72_35, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.72_18, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.71_84, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.67_48, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.66_56, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.66_14, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.64_56, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.6_42, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.64_19, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
] ,)
_a : str = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] ,threshold=0.64 ,)
self.assertEqual(
nested_simplify(_UpperCAmelCase ,decimals=4 ) ,[
[
{'score': 0.72_35, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.72_18, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.71_84, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.67_48, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.66_56, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.66_14, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.64_56, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.6_42, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.64_19, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
]
] ,)
@require_torch
@slow
def __lowercase ( self : List[str] ):
_a : int = pipeline('zero-shot-object-detection' )
_a : List[Any] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,candidate_labels=['cat', 'remote', 'couch'] ,)
self.assertEqual(
nested_simplify(_UpperCAmelCase ,decimals=4 ) ,[
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
] ,)
_a : Optional[Any] = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] ,)
self.assertEqual(
nested_simplify(_UpperCAmelCase ,decimals=4 ) ,[
[
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
[
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
] ,)
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def __lowercase ( self : Optional[int] ):
pass
@require_torch
@slow
def __lowercase ( self : Optional[int] ):
_a : int = 0.2
_a : Union[str, Any] = pipeline('zero-shot-object-detection' )
_a : Optional[int] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,candidate_labels=['cat', 'remote', 'couch'] ,threshold=_UpperCAmelCase ,)
self.assertEqual(
nested_simplify(_UpperCAmelCase ,decimals=4 ) ,[
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
] ,)
@require_torch
@slow
def __lowercase ( self : Union[str, Any] ):
_a : str = 2
_a : Any = pipeline('zero-shot-object-detection' )
_a : Dict = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,candidate_labels=['cat', 'remote', 'couch'] ,top_k=_UpperCAmelCase ,)
self.assertEqual(
nested_simplify(_UpperCAmelCase ,decimals=4 ) ,[
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
] ,)
| 107 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __magic_name__ :
def __init__( self : Any ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : Tuple=32 ,_UpperCAmelCase : Dict=16 ,_UpperCAmelCase : Optional[Any]=3 ,_UpperCAmelCase : Tuple=True ,_UpperCAmelCase : Dict=True ,_UpperCAmelCase : str=32 ,_UpperCAmelCase : Tuple=4 ,_UpperCAmelCase : List[Any]=[0, 1, 2, 3] ,_UpperCAmelCase : List[str]=4 ,_UpperCAmelCase : Any=37 ,_UpperCAmelCase : List[Any]="gelu" ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : Tuple=0.1 ,_UpperCAmelCase : Optional[Any]=0.02 ,_UpperCAmelCase : Optional[Any]=3 ,_UpperCAmelCase : List[str]=[1, 384, 24, 24] ,_UpperCAmelCase : Union[str, Any]=True ,_UpperCAmelCase : List[str]=None ,):
_a : int = parent
_a : Optional[Any] = batch_size
_a : str = image_size
_a : str = patch_size
_a : Any = num_channels
_a : Optional[Any] = is_training
_a : str = use_labels
_a : Union[str, Any] = hidden_size
_a : int = num_hidden_layers
_a : str = backbone_out_indices
_a : List[Any] = num_attention_heads
_a : Any = intermediate_size
_a : List[Any] = hidden_act
_a : int = hidden_dropout_prob
_a : Optional[int] = attention_probs_dropout_prob
_a : Optional[int] = initializer_range
_a : Tuple = num_labels
_a : Tuple = backbone_featmap_shape
_a : Optional[int] = scope
_a : Dict = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_a : str = (image_size // patch_size) ** 2
_a : Optional[int] = num_patches + 1
def __lowercase ( self : Dict ):
_a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Any = None
if self.use_labels:
_a : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
_a : int = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : int ):
_a : Dict = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 192, 384, 768],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,backbone_out_indices=self.backbone_out_indices ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,is_hybrid=self.is_hybrid ,backbone_config=_UpperCAmelCase ,backbone_featmap_shape=self.backbone_featmap_shape ,)
def __lowercase ( self : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[Any] = DPTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ):
_a : List[Any] = self.num_labels
_a : Dict = DPTForDepthEstimation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : str = model(_UpperCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape ,(self.batch_size, self.image_size, self.image_size) )
def __lowercase ( self : str ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any] ):
_a : Optional[int] = self.num_labels
_a : Optional[int] = DPTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Optional[int] = model(_UpperCAmelCase ,labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowercase ( self : List[Any] ):
_a : List[str] = self.prepare_config_and_inputs()
_a , _a , _a : List[Any] = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCAmelCase : int = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : List[str] = False
def __lowercase ( self : Union[str, Any] ):
_a : Union[str, Any] = DPTModelTester(self )
_a : int = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def __lowercase ( self : Tuple ):
pass
def __lowercase ( self : List[str] ):
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Union[str, Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase ,nn.Linear ) )
def __lowercase ( self : Any ):
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(_UpperCAmelCase )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Union[str, Any] = [*signature.parameters.keys()]
_a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : str ):
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : int ):
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_UpperCAmelCase )
def __lowercase ( self : str ):
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Tuple = True
if model_class in get_values(_UpperCAmelCase ):
continue
_a : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_a : Optional[int] = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : Optional[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : Union[str, Any] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Optional[Any] = False
_a : List[Any] = True
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
_a : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_a : int = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : Optional[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : str ):
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
_a : List[str] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_a : Tuple = model_class(config=_UpperCAmelCase )
# Skip the check for the backbone
_a : Any = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_a : Union[str, Any] = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : Optional[Any] ):
pass
@slow
def __lowercase ( self : Union[str, Any] ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_a : List[Any] = DPTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowercase ( self : Any ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Optional[Any] = 'add'
with self.assertRaises(_UpperCAmelCase ):
_a : List[str] = DPTForDepthEstimation(_UpperCAmelCase )
def __lowerCamelCase ( ) -> Optional[Any]:
_a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : Any ):
_a : Optional[Any] = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
_a : Union[str, Any] = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(_UpperCAmelCase )
_a : Optional[int] = prepare_img()
_a : Optional[Any] = image_processor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : Tuple = model(**_UpperCAmelCase )
_a : Union[str, Any] = outputs.predicted_depth
# verify the predicted depth
_a : str = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape ,_UpperCAmelCase )
_a : int = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 ,_UpperCAmelCase ,atol=1E-4 ) )
| 107 | 1 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Dict ):
A = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
A = F'{src_lang}-{tgt_lang}'
A = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(snake_case__ , exist_ok=snake_case__ )
A = os.path.join(snake_case__ , 'README.md' )
print(F'Generating {path}' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(snake_case__ )
# make sure we are under the root of the project
_lowercase = Path(__file__).resolve().parent.parent.parent
_lowercase = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_lowercase , _lowercase , _lowercase = model_name.split('''-''')
_lowercase = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang) | 74 | """simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCAmelCase = """base_with_context"""
def lowercase ( a__ : Optional[Any] , a__ : Optional[int] ) -> int:
_UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=a__ )
for lyr_num, lyr in enumerate(model.encoders ):
_UpperCamelCase = weights[F'''layers_{lyr_num}''']
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
_UpperCamelCase = ly_weight['''attention''']
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def lowercase ( a__ : List[Any] , a__ : Dict ) -> Optional[Any]:
_UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=a__ )
for lyr_num, lyr in enumerate(model.encoders ):
_UpperCamelCase = weights[F'''layers_{lyr_num}''']
_UpperCamelCase = ly_weight['''attention''']
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def lowercase ( a__ : List[Any] , a__ : Union[str, Any] ) -> str:
_UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=a__ )
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_UpperCamelCase = weights[F'''layers_{lyr_num}''']
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
_UpperCamelCase = ly_weight['''self_attention''']
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
_UpperCamelCase = ly_weight['''MultiHeadDotProductAttention_0''']
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def lowercase ( a__ : Union[str, Any] ) -> int:
_UpperCamelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_UpperCamelCase = jnp.tree_util.tree_map(onp.array , a__ )
_UpperCamelCase = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
_UpperCamelCase = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
_UpperCamelCase = inference.parse_training_gin_file(a__ , a__ )
_UpperCamelCase = inference.InferenceModel(args.checkpoint_path , a__ )
_UpperCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
_UpperCamelCase = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
_UpperCamelCase = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
_UpperCamelCase = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
_UpperCamelCase = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , a__ )
_UpperCamelCase = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , a__ )
_UpperCamelCase = load_decoder(ta_checkpoint['''target''']['''decoder'''] , a__ )
_UpperCamelCase = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
_UpperCamelCase = SpectrogramDiffusionPipeline(
notes_encoder=a__ , continuous_encoder=a__ , decoder=a__ , scheduler=a__ , melgan=a__ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
UpperCAmelCase = parser.parse_args()
main(args)
| 256 | 0 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str]=13 , SCREAMING_SNAKE_CASE : Dict=30 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : List[Any]=4 , SCREAMING_SNAKE_CASE : Union[str, Any]=37 , SCREAMING_SNAKE_CASE : int="gelu" , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : Tuple=10 , SCREAMING_SNAKE_CASE : str=0.02 , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Any=2 , ):
lowercase__ : Tuple = parent
lowercase__ : Dict = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Dict = num_channels
lowercase__ : Union[str, Any] = is_training
lowercase__ : Dict = use_labels
lowercase__ : Tuple = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : int = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : Any = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : Optional[int] = initializer_range
lowercase__ : Tuple = scope
lowercase__ : int = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase__ : Union[str, Any] = (image_size // patch_size) ** 2
lowercase__ : str = num_patches + 2
def snake_case ( self : Tuple ):
lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Dict = None
if self.use_labels:
lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Optional[int] = DeiTModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Any = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : str = DeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ : str = 1
lowercase__ : Optional[Any] = DeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int ):
lowercase__ : Tuple = self.type_sequence_label_size
lowercase__ : str = DeiTForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : int = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ : str = 1
lowercase__ : Optional[Any] = DeiTForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : Any = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self : Dict ):
lowercase__ : Dict = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : List[str] = config_and_inputs
lowercase__ : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase_ = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : str ):
lowercase__ : int = DeiTModelTester(self )
lowercase__ : List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : int ):
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def snake_case ( self : Optional[int] ):
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Any = [*signature.parameters.keys()]
lowercase__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict ):
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any=False ):
lowercase__ : int = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def snake_case ( self : str ):
if not self.model_tester.is_training:
return
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[str] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(SCREAMING_SNAKE_CASE )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase__ : List[Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
lowercase__ : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : Optional[int] ):
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase__ : Dict = False
lowercase__ : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase__ : int = model_class(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.to(SCREAMING_SNAKE_CASE )
model.train()
lowercase__ : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : str = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : Optional[int] ):
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(SCREAMING_SNAKE_CASE ),
*get_values(SCREAMING_SNAKE_CASE ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ):
lowercase__ : Any = problem_type["title"]
lowercase__ : int = problem_type["num_labels"]
lowercase__ : str = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
lowercase__ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if problem_type["num_labels"] > 1:
lowercase__ : Optional[int] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
lowercase__ : int = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE ) as warning_list:
lowercase__ : int = model(**SCREAMING_SNAKE_CASE ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def snake_case ( self : Tuple ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = DeiTModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : int ):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def snake_case ( self : int ):
lowercase__ : Dict = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : int = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : List[Any] = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def snake_case ( self : List[str] ):
lowercase__ : Union[str, Any] = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
lowercase__ : Any = self.default_image_processor
lowercase__ : List[str] = prepare_img()
lowercase__ : Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" )
lowercase__ : List[str] = inputs.pixel_values.to(SCREAMING_SNAKE_CASE )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
| 121 |
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int]=100 , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : str=30 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : Dict=32 , SCREAMING_SNAKE_CASE : str=5 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : str=37 , SCREAMING_SNAKE_CASE : Any="gelu" , SCREAMING_SNAKE_CASE : Tuple=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=10 , SCREAMING_SNAKE_CASE : Dict=0.02 , SCREAMING_SNAKE_CASE : Any=3 , ):
lowercase__ : Optional[int] = parent
lowercase__ : Optional[int] = vocab_size
lowercase__ : Dict = batch_size
lowercase__ : List[Any] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Tuple = num_channels
lowercase__ : Any = is_training
lowercase__ : str = use_labels
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[int] = num_hidden_layers
lowercase__ : Dict = num_attention_heads
lowercase__ : Optional[int] = intermediate_size
lowercase__ : int = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : int = type_sequence_label_size
lowercase__ : Optional[int] = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ : str = (image_size // patch_size) ** 2
lowercase__ : List[str] = num_patches + 1
def snake_case ( self : Tuple ):
lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Union[str, Any] = None
if self.use_labels:
lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : int = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] ):
lowercase__ : Optional[Any] = FlaxBeitModel(config=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : int = FlaxBeitForMaskedImageModeling(config=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Tuple = self.type_sequence_label_size
lowercase__ : Optional[int] = FlaxBeitForImageClassification(config=SCREAMING_SNAKE_CASE )
lowercase__ : str = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ : int = 1
lowercase__ : List[str] = FlaxBeitForImageClassification(SCREAMING_SNAKE_CASE )
lowercase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : str = config_and_inputs
lowercase__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def snake_case ( self : Any ):
lowercase__ : List[Any] = FlaxBeitModelTester(self )
lowercase__ : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : int ):
self.config_tester.run_common_tests()
def snake_case ( self : int ):
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : str = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[int] = [*signature.parameters.keys()]
lowercase__ : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = model_class(SCREAMING_SNAKE_CASE )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
return model(pixel_values=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
with self.subTest("JIT Enabled" ):
lowercase__ : Union[str, Any] = model_jitted(**SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowercase__ : Optional[int] = model_jitted(**SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : int ):
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
lowercase__ : Any = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
lowercase__ : Optional[int] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@require_flax
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : int ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def snake_case ( self : Union[str, Any] ):
lowercase__ : Tuple = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
lowercase__ : int = self.default_image_processor
lowercase__ : Union[str, Any] = prepare_img()
lowercase__ : str = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" ).pixel_values
# prepare bool_masked_pos
lowercase__ : Optional[Any] = np.ones((1, 196) , dtype=SCREAMING_SNAKE_CASE )
# forward pass
lowercase__ : Any = model(pixel_values=SCREAMING_SNAKE_CASE , bool_masked_pos=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = outputs.logits
# verify the logits
lowercase__ : List[str] = (1, 196, 8_192)
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Dict = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 ) )
@slow
def snake_case ( self : Any ):
lowercase__ : Union[str, Any] = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
lowercase__ : Tuple = self.default_image_processor
lowercase__ : List[Any] = prepare_img()
lowercase__ : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" )
# forward pass
lowercase__ : str = model(**SCREAMING_SNAKE_CASE )
lowercase__ : Dict = outputs.logits
# verify the logits
lowercase__ : List[str] = (1, 1_000)
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
lowercase__ : str = 281
self.assertEqual(logits.argmax(-1 ).item() , SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : str ):
lowercase__ : List[Any] = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
lowercase__ : Dict = self.default_image_processor
lowercase__ : Dict = prepare_img()
lowercase__ : List[Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" )
# forward pass
lowercase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = outputs.logits
# verify the logits
lowercase__ : int = (1, 21_841)
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : str = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
lowercase__ : Union[str, Any] = 2_396
self.assertEqual(logits.argmax(-1 ).item() , SCREAMING_SNAKE_CASE )
| 121 | 1 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
a : Dict = None
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a : str = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
a : List[Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class a ( _lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = TaTokenizer
snake_case_ = []
def __init__( self : List[Any] , lowercase_ : int=None , lowercase_ : Dict=None , lowercase_ : Dict="</s>" , lowercase_ : List[Any]="<unk>" , lowercase_ : int="<pad>" , lowercase_ : int=100 , lowercase_ : List[Any]=None , **lowercase_ : List[str] , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
snake_case_ = [F"<extra_id_{i}>" for i in range(lowercase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
snake_case_ = len(set(filter(lambda lowercase_ : bool('''extra_id_''' in str(lowercase_ ) ) , lowercase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
snake_case_ = extra_ids
@staticmethod
def A_ ( lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : int ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
snake_case_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F" {pretrained_model_name_or_path} automatically truncating your input to"
F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , lowercase_ , )
return max_model_length
def A_ ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
logger.info(F"Copy vocab file to {out_vocab_file}" )
return (out_vocab_file,)
def A_ ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
snake_case_ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def A_ ( self : int , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def A_ ( self : Dict ):
return list(
set(filter(lambda lowercase_ : bool(re.search(R'''<extra_id_\d+>''' , lowercase_ ) ) is not None , self.additional_special_tokens ) ) )
def A_ ( self : Any ):
return [self.convert_tokens_to_ids(lowercase_ ) for token in self.get_sentinel_tokens()]
| 56 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 68 | 0 |
UpperCAmelCase_ : Dict = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
UpperCAmelCase_ : Any = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
UpperCAmelCase_ : Any = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 62 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( _a , _a , _a , unittest.TestCase ):
snake_case__ : Dict = StableDiffusionControlNetImgaImgPipeline
snake_case__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
snake_case__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
snake_case__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _A ( self : List[str] ):
torch.manual_seed(0 )
UpperCamelCase :str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
UpperCamelCase :str = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
UpperCamelCase :List[str] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
UpperCamelCase :Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase :Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase :Union[str, Any] = CLIPTextModel(__lowerCamelCase )
UpperCamelCase :Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCamelCase :Any = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _A ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=0 ):
if str(__lowerCamelCase ).startswith("""mps""" ):
UpperCamelCase :Optional[int] = torch.manual_seed(__lowerCamelCase )
else:
UpperCamelCase :List[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :Optional[Any] = 2
UpperCamelCase :Optional[int] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__lowerCamelCase , device=torch.device(__lowerCamelCase ) , )
UpperCamelCase :Tuple = floats_tensor(control_image.shape , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
UpperCamelCase :str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase :Optional[Any] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("""RGB""" ).resize((64, 64) )
UpperCamelCase :str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def _A ( self : Dict ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _A ( self : Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def _A ( self : Optional[Any] ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
snake_case__ : Optional[Any] = StableDiffusionControlNetImgaImgPipeline
snake_case__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
snake_case__ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _A ( self : List[Any] ):
torch.manual_seed(0 )
UpperCamelCase :Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__lowerCamelCase : Union[str, Any] ):
if isinstance(__lowerCamelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
UpperCamelCase :Union[str, Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__lowerCamelCase )
torch.manual_seed(0 )
UpperCamelCase :Tuple = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__lowerCamelCase )
torch.manual_seed(0 )
UpperCamelCase :str = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
UpperCamelCase :Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase :Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase :List[Any] = CLIPTextModel(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCamelCase :Optional[Any] = MultiControlNetModel([controlneta, controlneta] )
UpperCamelCase :int = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _A ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]=0 ):
if str(__lowerCamelCase ).startswith("""mps""" ):
UpperCamelCase :Dict = torch.manual_seed(__lowerCamelCase )
else:
UpperCamelCase :Tuple = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :Any = 2
UpperCamelCase :List[str] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__lowerCamelCase , device=torch.device(__lowerCamelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__lowerCamelCase , device=torch.device(__lowerCamelCase ) , ),
]
UpperCamelCase :int = floats_tensor(control_image[0].shape , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
UpperCamelCase :List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase :Union[str, Any] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("""RGB""" ).resize((64, 64) )
UpperCamelCase :Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def _A ( self : List[str] ):
UpperCamelCase :Union[str, Any] = self.get_dummy_components()
UpperCamelCase :List[str] = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
UpperCamelCase :Optional[Any] = 10.0
UpperCamelCase :str = 4
UpperCamelCase :Optional[int] = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :str = steps
UpperCamelCase :Tuple = scale
UpperCamelCase :List[str] = pipe(**__lowerCamelCase )[0]
UpperCamelCase :Optional[int] = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :List[Any] = steps
UpperCamelCase :str = scale
UpperCamelCase :int = pipe(**__lowerCamelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
UpperCamelCase :List[str] = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :Optional[Any] = steps
UpperCamelCase :str = scale
UpperCamelCase :Any = pipe(**__lowerCamelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
UpperCamelCase :Tuple = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = steps
UpperCamelCase :str = scale
UpperCamelCase :Optional[int] = pipe(**__lowerCamelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def _A ( self : Any ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _A ( self : Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def _A ( self : Dict ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def _A ( self : Any ):
UpperCamelCase :List[str] = self.get_dummy_components()
UpperCamelCase :List[str] = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__lowerCamelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : List[str] ):
UpperCamelCase :Tuple = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
UpperCamelCase :List[Any] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowerCamelCase , controlnet=__lowerCamelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCamelCase :Optional[int] = """evil space-punk bird"""
UpperCamelCase :List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
UpperCamelCase :List[str] = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
UpperCamelCase :str = pipe(
__lowerCamelCase , __lowerCamelCase , control_image=__lowerCamelCase , generator=__lowerCamelCase , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
UpperCamelCase :int = output.images[0]
assert image.shape == (512, 512, 3)
UpperCamelCase :Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 62 | 1 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCAmelCase_ : Union[str, Any] = """<<<<<<< This should probably be modified because it mentions: """
UpperCAmelCase_ : Optional[Any] = """=======
>>>>>>>
"""
UpperCAmelCase_ : Union[str, Any] = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
UpperCAmelCase_ : List[str] = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def _A (__a ) -> Optional[int]:
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowercase_ : ArgumentParser):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=lowercase_ , required=lowercase_ , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=lowercase_ , required=lowercase_ , help='''Path to the HuggingFace Datasets folder.''')
train_parser.set_defaults(func=lowercase_)
def __init__( self : Union[str, Any] , lowercase_ : str , lowercase_ : str , *lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = get_logger('''datasets-cli/converting''')
SCREAMING_SNAKE_CASE_ : Optional[Any] = tfds_path
SCREAMING_SNAKE_CASE_ : Tuple = datasets_directory
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
if os.path.isdir(self._tfds_path):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
SCREAMING_SNAKE_CASE_ : List[str] = os.path.dirname(self._tfds_path)
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''')
SCREAMING_SNAKE_CASE_ : Any = os.path.abspath(self._datasets_directory)
self._logger.info(F'Converting datasets from {abs_tfds_path} to {abs_datasets_path}')
SCREAMING_SNAKE_CASE_ : Optional[int] = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
if os.path.isdir(self._tfds_path):
SCREAMING_SNAKE_CASE_ : List[str] = os.listdir(lowercase_)
else:
SCREAMING_SNAKE_CASE_ : Any = [os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info(F'Looking at file {f_name}')
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = os.path.join(lowercase_ , lowercase_)
if not os.path.isfile(lowercase_) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''')
continue
with open(lowercase_ , encoding='''utf-8''') as f:
SCREAMING_SNAKE_CASE_ : Any = f.readlines()
SCREAMING_SNAKE_CASE_ : List[str] = []
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : List[str] = []
for line in lines:
SCREAMING_SNAKE_CASE_ : List[Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
SCREAMING_SNAKE_CASE_ : List[str] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''''''
continue
elif "from absl import logging" in out_line:
SCREAMING_SNAKE_CASE_ : str = '''from datasets import logging\n'''
elif "getLogger" in out_line:
SCREAMING_SNAKE_CASE_ : List[str] = out_line.replace('''getLogger''' , '''get_logger''')
elif any(expression in out_line for expression in TO_HIGHLIGHT):
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Dict = list(filter(lambda lowercase_: e in out_line , lowercase_))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowercase_) + '''\n''')
out_lines.append(lowercase_)
out_lines.append(lowercase_)
continue
else:
for pattern, replacement in TO_CONVERT:
SCREAMING_SNAKE_CASE_ : List[Any] = re.sub(lowercase_ , lowercase_ , lowercase_)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
SCREAMING_SNAKE_CASE_ : Any = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , lowercase_)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(''','''))
SCREAMING_SNAKE_CASE_ : str = '''from . import ''' + match.group(1)
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'Error converting {out_line.strip()}')
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
SCREAMING_SNAKE_CASE_ : List[str] = True
out_lines.append(lowercase_)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
SCREAMING_SNAKE_CASE_ : List[Any] = f_name.replace('''.py''' , '''''')
SCREAMING_SNAKE_CASE_ : Any = os.path.join(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : str = os.path.join(lowercase_ , lowercase_)
os.makedirs(lowercase_ , exist_ok=lowercase_)
self._logger.info(F'Adding directory {output_dir}')
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
else:
# Utilities will be moved at the end
utils_files.append(lowercase_)
if needs_manual_update:
with_manual_update.append(lowercase_)
with open(lowercase_ , '''w''' , encoding='''utf-8''') as f:
f.writelines(lowercase_)
self._logger.info(F'Converted in {output_file}')
for utils_file in utils_files:
try:
SCREAMING_SNAKE_CASE_ : Tuple = os.path.basename(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''')]
self._logger.info(F'Moving {dest_folder} to {utils_file}')
shutil.copy(lowercase_ , lowercase_)
except KeyError:
self._logger.error(F'Cannot find destination folder for {utils_file}. Please copy manually.')
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.')
| 91 |
from __future__ import annotations
from typing import Any
class UpperCamelCase__ :
def __init__(self : Union[str, Any] , snake_case_ : int ):
__a : Dict = num_of_nodes
__a : list[list[int]] = []
__a : dict[int, int] = {}
def lowerCAmelCase (self : Optional[Any] , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowerCAmelCase (self : Any , snake_case_ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowerCAmelCase (self : str , snake_case_ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
__a : Optional[int] = self.find_component(snake_case_ )
def lowerCAmelCase (self : Any , snake_case_ : list[int] , snake_case_ : int , snake_case_ : int ):
if component_size[u_node] <= component_size[v_node]:
__a : List[str] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(snake_case_ )
elif component_size[u_node] >= component_size[v_node]:
__a : Optional[int] = self.find_component(snake_case_ )
component_size[u_node] += component_size[v_node]
self.set_component(snake_case_ )
def lowerCAmelCase (self : Optional[Any] ):
__a : str = []
__a : int = 0
__a : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__a : Union[str, Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__a , __a , __a : Optional[Any] = edge
__a : List[str] = self.m_component[u]
__a : List[Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__a : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(snake_case_ , snake_case_ ):
__a , __a , __a : str = edge
__a : Any = self.m_component[u]
__a : Any = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(snake_case_ , snake_case_ , snake_case_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
__a : Optional[int] = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def __UpperCamelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 | 0 |
def lowercase__ ( __snake_case : list[int] , __snake_case : list[int] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = len(__snake_case )
print('The following activities are selected:' )
# The first activity is always selected
UpperCAmelCase_ : Any = 0
print(__snake_case , end=',' )
# Consider rest of the activities
for j in range(__snake_case ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__snake_case , end=',' )
UpperCAmelCase_ : int = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = [1, 3, 0, 5, 8, 5]
__UpperCAmelCase = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 145 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : List[str] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCAmelCase_ : str = VideoClassificationPipeline(model=_UpperCamelCase , image_processor=_UpperCamelCase , top_k=2 )
UpperCAmelCase_ : List[str] = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
for example in examples:
UpperCAmelCase_ : str = video_classifier(_UpperCamelCase )
self.assertEqual(
_UpperCamelCase , [
{'score': ANY(_UpperCamelCase ), 'label': ANY(_UpperCamelCase )},
{'score': ANY(_UpperCamelCase ), 'label': ANY(_UpperCamelCase )},
] , )
@require_torch
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : str = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
UpperCAmelCase_ : Optional[Any] = VideoMAEFeatureExtractor(
size={'shortest_edge': 1_0} , crop_size={'height': 1_0, 'width': 1_0} )
UpperCAmelCase_ : str = pipeline(
'video-classification' , model=_UpperCamelCase , feature_extractor=_UpperCamelCase , frame_sampling_rate=4 )
UpperCAmelCase_ : Any = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCAmelCase_ : List[str] = video_classifier(_UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}] , )
UpperCAmelCase_ : Tuple = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
] , )
@require_tf
def __UpperCAmelCase ( self ) -> Dict:
pass
| 145 | 1 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _lowerCamelCase( lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
__lowercase= RobertaPreLayerNormConfig.from_pretrained(
a__ , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
__lowercase= torch.load(hf_hub_download(repo_id=a__ , filename='pytorch_model.bin' ) )
__lowercase= {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
__lowercase= 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
__lowercase= tensor_value
__lowercase= RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=a__ , config=a__ , state_dict=a__ )
model.save_pretrained(a__ )
# convert tokenizer
__lowercase= AutoTokenizer.from_pretrained(a__ )
tokenizer.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 295 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : torch.FloatTensor
class lowercase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , __UpperCamelCase = 3 , __UpperCamelCase = 3 , __UpperCamelCase = ("DownEncoderBlock2D",) , __UpperCamelCase = ("UpDecoderBlock2D",) , __UpperCamelCase = (6_4,) , __UpperCamelCase = 1 , __UpperCamelCase = "silu" , __UpperCamelCase = 3 , __UpperCamelCase = 3_2 , __UpperCamelCase = 2_5_6 , __UpperCamelCase = 3_2 , __UpperCamelCase = None , __UpperCamelCase = 0.18_215 , __UpperCamelCase = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
UpperCamelCase_ = Encoder(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , down_block_types=__UpperCamelCase , block_out_channels=__UpperCamelCase , layers_per_block=__UpperCamelCase , act_fn=__UpperCamelCase , norm_num_groups=__UpperCamelCase , double_z=__UpperCamelCase , )
UpperCamelCase_ = vq_embed_dim if vq_embed_dim is not None else latent_channels
UpperCamelCase_ = nn.Convad(__UpperCamelCase , __UpperCamelCase , 1 )
UpperCamelCase_ = VectorQuantizer(__UpperCamelCase , __UpperCamelCase , beta=0.25 , remap=__UpperCamelCase , sane_index_shape=__UpperCamelCase )
UpperCamelCase_ = nn.Convad(__UpperCamelCase , __UpperCamelCase , 1 )
# pass init params to Decoder
UpperCamelCase_ = Decoder(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , up_block_types=__UpperCamelCase , block_out_channels=__UpperCamelCase , layers_per_block=__UpperCamelCase , act_fn=__UpperCamelCase , norm_num_groups=__UpperCamelCase , norm_type=__UpperCamelCase , )
@apply_forward_hook
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = True ):
"""simple docstring"""
UpperCamelCase_ = self.encoder(__UpperCamelCase )
UpperCamelCase_ = self.quant_conv(__UpperCamelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__UpperCamelCase )
@apply_forward_hook
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = True ):
"""simple docstring"""
if not force_not_quantize:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.quantize(__UpperCamelCase )
else:
UpperCamelCase_ = h
UpperCamelCase_ = self.post_quant_conv(__UpperCamelCase )
UpperCamelCase_ = self.decoder(__UpperCamelCase , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = True ):
"""simple docstring"""
UpperCamelCase_ = sample
UpperCamelCase_ = self.encode(__UpperCamelCase ).latents
UpperCamelCase_ = self.decode(__UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCamelCase )
| 122 | 0 |
def A ( ) -> Tuple:
__UpperCamelCase : Optional[int] =[]
__UpperCamelCase : Optional[int] =1
while len(a_ ) < 1e6:
constant.append(str(a_ ) )
i += 1
__UpperCamelCase : Any =''.join(a_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9_999] )
* int(constant[99_999] )
* int(constant[999_999] )
)
if __name__ == "__main__":
print(solution())
| 245 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A ( a_ = 3 ) -> qiskit.result.counts.Counts:
if isinstance(a_ ,a_ ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(a_ ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
__UpperCamelCase : str =QuantumRegister(a_ ,'qr' )
__UpperCamelCase : Optional[int] =ClassicalRegister(a_ ,'cr' )
__UpperCamelCase : Optional[Any] =QuantumCircuit(a_ ,a_ )
__UpperCamelCase : Any =number_of_qubits
for i in range(a_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(a_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) ,a_ ,a_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(a_ ,number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(a_ ,a_ )
# simulate with 10000 shots
__UpperCamelCase : Any =Aer.get_backend('qasm_simulator' )
__UpperCamelCase : Tuple =execute(a_ ,a_ ,shots=10_000 )
return job.result().get_counts(a_ )
if __name__ == "__main__":
print(
f"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 245 | 1 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
a__ : List[str] ='''naver-clova-ix/donut-base'''
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = DonutProcessor.from_pretrained(__A )
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
__UpperCamelCase = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
__UpperCamelCase = self.processor.tokenajson(__A )
self.assertDictEqual(__A , __A )
| 53 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : int = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "encodec"
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Tuple=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase_ : Tuple=2_4_0_0_0 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Dict=1_2_8 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : Dict=[8, 5, 4, 2] , lowerCAmelCase_ : Optional[Any]="weight_norm" , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : int=7 , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int="reflect" , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : List[Any]=1.0 , lowerCAmelCase_ : Dict=1_0_2_4 , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=True , **lowerCAmelCase_ : List[str] , ):
"""simple docstring"""
lowercase_ = target_bandwidths
lowercase_ = sampling_rate
lowercase_ = audio_channels
lowercase_ = normalize
lowercase_ = chunk_length_s
lowercase_ = overlap
lowercase_ = hidden_size
lowercase_ = num_filters
lowercase_ = num_residual_layers
lowercase_ = upsampling_ratios
lowercase_ = norm_type
lowercase_ = kernel_size
lowercase_ = last_kernel_size
lowercase_ = residual_kernel_size
lowercase_ = dilation_growth_rate
lowercase_ = use_causal_conv
lowercase_ = pad_mode
lowercase_ = compress
lowercase_ = num_lstm_layers
lowercase_ = trim_right_ratio
lowercase_ = codebook_size
lowercase_ = codebook_dim if codebook_dim is not None else hidden_size
lowercase_ = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''')
super().__init__(**lowerCAmelCase_)
@property
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length))
@property
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0))
| 136 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''unispeech'''
def __init__( self ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE="group" ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) ,_SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) ,_SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.05 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=320 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="mean" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=80 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.5 ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Union[str, Any] = feat_extract_norm
UpperCAmelCase_ : Tuple = feat_extract_activation
UpperCAmelCase_ : List[Any] = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = conv_bias
UpperCAmelCase_ : Optional[int] = num_conv_pos_embeddings
UpperCAmelCase_ : str = num_conv_pos_embedding_groups
UpperCAmelCase_ : Tuple = len(self.conv_dim )
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = hidden_dropout
UpperCAmelCase_ : str = attention_dropout
UpperCAmelCase_ : int = activation_dropout
UpperCAmelCase_ : str = feat_proj_dropout
UpperCAmelCase_ : Dict = final_dropout
UpperCAmelCase_ : Dict = layerdrop
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Tuple = num_ctc_classes
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Union[str, Any] = do_stable_layer_norm
UpperCAmelCase_ : List[Any] = use_weighted_layer_sum
UpperCAmelCase_ : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : Union[str, Any] = apply_spec_augment
UpperCAmelCase_ : Any = mask_time_prob
UpperCAmelCase_ : Any = mask_time_length
UpperCAmelCase_ : Union[str, Any] = mask_time_min_masks
UpperCAmelCase_ : str = mask_feature_prob
UpperCAmelCase_ : Dict = mask_feature_length
UpperCAmelCase_ : Union[str, Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Optional[int] = num_codevectors_per_group
UpperCAmelCase_ : str = num_codevector_groups
UpperCAmelCase_ : Tuple = contrastive_logits_temperature
UpperCAmelCase_ : Union[str, Any] = feat_quantizer_dropout
UpperCAmelCase_ : Any = num_negatives
UpperCAmelCase_ : Dict = codevector_dim
UpperCAmelCase_ : Tuple = proj_codevector_dim
UpperCAmelCase_ : Optional[Any] = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : List[Any] = ctc_loss_reduction
UpperCAmelCase_ : Tuple = ctc_zero_infinity
# pretraining loss
UpperCAmelCase_ : List[Any] = replace_prob
@property
def a__ ( self ) -> Optional[Any]:
return functools.reduce(operator.mul ,self.conv_stride ,1 ) | 235 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowerCamelCase__ ( _lowercase , _lowercase=0 ):
'''simple docstring'''
return sorted(_lowercase , key=lambda _lowercase : x[column] )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=float('''inf''' ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , _lowercase ):
UpperCAmelCase_ : Optional[int] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase_ : Optional[Any] = current_dis
return min_dis
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=float('''inf''' ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , _lowercase ):
for j in range(max(0 , i - 6 ) , _lowercase ):
UpperCAmelCase_ : List[str] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase_ : Optional[int] = current_dis
return min_dis
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(_lowercase , _lowercase )
# recursion
UpperCAmelCase_ : Optional[int] = points_counts // 2
UpperCAmelCase_ : List[Any] = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[:mid] , _lowercase )
UpperCAmelCase_ : Dict = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[mid:] , points_counts - mid )
UpperCAmelCase_ : Union[str, Any] = min(_lowercase , _lowercase )
UpperCAmelCase_ : str = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_lowercase )
UpperCAmelCase_ : Optional[Any] = dis_between_closest_in_strip(
_lowercase , len(_lowercase ) , _lowercase )
return min(_lowercase , _lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = column_based_sort(_lowercase , column=0 )
UpperCAmelCase_ : List[Any] = column_based_sort(_lowercase , column=1 )
return (
closest_pair_of_points_sqr(
_lowercase , _lowercase , _lowercase )
) ** 0.5
if __name__ == "__main__":
__a = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points))) | 235 | 1 |
from __future__ import annotations
UpperCAmelCase__ = list[tuple[int, int]]
UpperCAmelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Dict:
_lowercase =pos_x
_lowercase =pos_y
_lowercase =(pos_y, pos_x)
_lowercase =goal_x
_lowercase =goal_y
_lowercase =g_cost
_lowercase =parent
_lowercase =self.calculate_heuristic()
def __A (self ) -> float:
_lowercase =abs(self.pos_x - self.goal_x )
_lowercase =abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__(self , UpperCAmelCase ) -> bool:
return self.f_cost < other.f_cost
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
_lowercase =Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCAmelCase )
_lowercase =Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , __lowerCAmelCase )
_lowercase =[self.start]
_lowercase =[]
_lowercase =False
def __A (self ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowercase =self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_lowercase =True
return self.retrace_path(__lowerCAmelCase )
self.closed_nodes.append(__lowerCAmelCase )
_lowercase =self.get_successors(__lowerCAmelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowerCAmelCase )
else:
# retrieve the best current path
_lowercase =self.open_nodes.pop(self.open_nodes.index(__lowerCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowerCAmelCase )
else:
self.open_nodes.append(__lowerCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def __A (self , UpperCAmelCase ) -> list[Node]:
_lowercase =[]
for action in delta:
_lowercase =parent.pos_x + action[1]
_lowercase =parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowerCAmelCase , __lowerCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCAmelCase , ) )
return successors
def __A (self , UpperCAmelCase ) -> Path:
_lowercase =node
_lowercase =[]
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowercase =current_node.parent
path.reverse()
return path
if __name__ == "__main__":
UpperCAmelCase__ = (0, 0)
UpperCAmelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
UpperCAmelCase__ = GreedyBestFirst(init, goal)
UpperCAmelCase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
UpperCAmelCase__ = 2
for elem in grid:
print(elem)
| 5 |
from math import ceil
def __lowerCamelCase ( __a :int = 1_0_0_1 ) -> int:
"""simple docstring"""
A__ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
A__ = 2 * i + 1
A__ = 2 * i
A__ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 274 | 0 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase_ :
def __init__( self : List[Any] , A__ : Any , A__ : List[Any]=100 , A__ : List[Any]=13 , A__ : Tuple=30 , A__ : Any=2 , A__ : Any=3 , A__ : str=True , A__ : List[str]=True , A__ : Optional[int]=32 , A__ : Any=4 , A__ : List[Any]=4 , A__ : Any=37 , A__ : List[str]="gelu" , A__ : int=0.1 , A__ : Tuple=0.1 , A__ : Union[str, Any]=10 , A__ : Tuple=0.02 , A__ : List[str]=3 , A__ : Optional[int]=None , A__ : List[str]=[0, 1, 2, 3] , ) -> str:
_snake_case = parent
_snake_case = 100
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = scope
_snake_case = out_indices
_snake_case = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case = (image_size // patch_size) ** 2
_snake_case = num_patches + 1
def UpperCamelCase_ ( self : List[Any] ) -> int:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : str ) -> Optional[Any]:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCamelCase_ ( self : Dict , A__ : Tuple , A__ : str , A__ : Union[str, Any] , A__ : int ) -> Tuple:
_snake_case = BeitModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_snake_case = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Optional[int] , A__ : Optional[int] , A__ : List[Any] , A__ : Optional[int] , A__ : Tuple ) -> Optional[Any]:
_snake_case = BeitForMaskedImageModeling(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_snake_case = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase_ ( self : List[str] , A__ : Optional[Any] , A__ : Optional[int] , A__ : Optional[int] , A__ : str ) -> int:
_snake_case = self.type_sequence_label_size
_snake_case = BeitForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_snake_case = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case = 1
_snake_case = BeitForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : List[Any] , A__ : Optional[int] , A__ : List[Any] , A__ : List[str] , A__ : Dict ) -> Optional[int]:
_snake_case = self.num_labels
_snake_case = BeitForSemanticSegmentation(lowercase_ )
model.to(lowercase_ )
model.eval()
_snake_case = model(lowercase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_snake_case = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCamelCase_ ( self : Tuple ) -> Optional[Any]:
_snake_case = self.prepare_config_and_inputs()
_snake_case, _snake_case, _snake_case, _snake_case = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[int] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase_ : Union[str, Any] = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Any = False
UpperCamelCase_ : str = False
UpperCamelCase_ : int = False
def UpperCamelCase_ ( self : Optional[int] ) -> List[str]:
_snake_case = BeitModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def UpperCamelCase_ ( self : Optional[int] ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def UpperCamelCase_ ( self : Any ) -> List[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def UpperCamelCase_ ( self : int ) -> Optional[int]:
pass
def UpperCamelCase_ ( self : Optional[int] ) -> Tuple:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def UpperCamelCase_ ( self : Union[str, Any] ) -> Dict:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase_ )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCamelCase_ ( self : Optional[Any] ) -> Tuple:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCamelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def UpperCamelCase_ ( self : Any ) -> Any:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def UpperCamelCase_ ( self : List[str] ) -> int:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_ )
def UpperCamelCase_ ( self : List[Any] ) -> str:
if not self.model_tester.is_training:
return
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowercase_ ), BeitForMaskedImageModeling]:
continue
_snake_case = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
_snake_case = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
_snake_case = model(**lowercase_ ).loss
loss.backward()
def UpperCamelCase_ ( self : Union[str, Any] ) -> Dict:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_snake_case = False
_snake_case = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowercase_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case = model_class(lowercase_ )
model.gradient_checkpointing_enable()
model.to(lowercase_ )
model.train()
_snake_case = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
_snake_case = model(**lowercase_ ).loss
loss.backward()
def UpperCamelCase_ ( self : List[Any] ) -> Dict:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = _config_zero_init(lowercase_ )
for model_class in self.all_model_classes:
_snake_case = model_class(config=lowercase_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def UpperCamelCase_ ( self : List[Any] ) -> List[str]:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = BeitModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def snake_case_():
"""simple docstring"""
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Optional[int] ) -> str:
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : Optional[int] ) -> Any:
_snake_case = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(lowercase_ )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase_ , return_tensors='''pt''' ).pixel_values.to(lowercase_ )
# prepare bool_masked_pos
_snake_case = torch.ones((1, 196) , dtype=torch.bool ).to(lowercase_ )
# forward pass
with torch.no_grad():
_snake_case = model(pixel_values=lowercase_ , bool_masked_pos=lowercase_ )
_snake_case = outputs.logits
# verify the logits
_snake_case = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , lowercase_ )
_snake_case = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowercase_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowercase_ , atol=1e-2 ) )
@slow
def UpperCamelCase_ ( self : Optional[int] ) -> Optional[Any]:
_snake_case = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(lowercase_ )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase_ )
_snake_case = outputs.logits
# verify the logits
_snake_case = torch.Size((1, 1000) )
self.assertEqual(logits.shape , lowercase_ )
_snake_case = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowercase_ )
self.assertTrue(torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 ) )
_snake_case = 281
self.assertEqual(logits.argmax(-1 ).item() , lowercase_ )
@slow
def UpperCamelCase_ ( self : Dict ) -> Optional[int]:
_snake_case = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
lowercase_ )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase_ )
_snake_case = outputs.logits
# verify the logits
_snake_case = torch.Size((1, 21841) )
self.assertEqual(logits.shape , lowercase_ )
_snake_case = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowercase_ )
self.assertTrue(torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 ) )
_snake_case = 2396
self.assertEqual(logits.argmax(-1 ).item() , lowercase_ )
@slow
def UpperCamelCase_ ( self : List[Any] ) -> int:
_snake_case = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
_snake_case = model.to(lowercase_ )
_snake_case = BeitImageProcessor(do_resize=lowercase_ , size=640 , do_center_crop=lowercase_ )
_snake_case = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
_snake_case = Image.open(ds[0]['''file'''] )
_snake_case = image_processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase_ )
_snake_case = outputs.logits
# verify the logits
_snake_case = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , lowercase_ )
_snake_case = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
_snake_case = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=lowercase_ , )
else:
_snake_case = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=lowercase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1e-4 ) )
@slow
def UpperCamelCase_ ( self : Dict ) -> Union[str, Any]:
_snake_case = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
_snake_case = model.to(lowercase_ )
_snake_case = BeitImageProcessor(do_resize=lowercase_ , size=640 , do_center_crop=lowercase_ )
_snake_case = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
_snake_case = Image.open(ds[0]['''file'''] )
_snake_case = image_processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase_ )
_snake_case = outputs.logits.detach().cpu()
_snake_case = image_processor.post_process_semantic_segmentation(outputs=lowercase_ , target_sizes=[(500, 300)] )
_snake_case = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowercase_ )
_snake_case = image_processor.post_process_semantic_segmentation(outputs=lowercase_ )
_snake_case = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , lowercase_ )
| 356 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ :
def __init__( self : Optional[Any] , A__ : str , A__ : Any=13 , A__ : str=[30, 30] , A__ : int=2 , A__ : Dict=3 , A__ : str=True , A__ : Union[str, Any]=True , A__ : Any=32 , A__ : int=5 , A__ : str=4 , A__ : List[Any]=37 , A__ : Union[str, Any]="gelu" , A__ : Dict=0.1 , A__ : Dict=0.1 , A__ : Tuple=10 , A__ : Dict=0.02 , A__ : Any=3 , A__ : Union[str, Any]=None , A__ : Optional[Any]=8 , A__ : Dict=10 , ) -> Optional[Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = scope
_snake_case = n_targets
_snake_case = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
_snake_case = (image_size[1] // patch_size) * (image_size[0] // patch_size)
_snake_case = num_patches + 1 + self.num_detection_tokens
def UpperCamelCase_ ( self : List[str] ) -> str:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
_snake_case = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
_snake_case = []
for i in range(self.batch_size ):
_snake_case = {}
_snake_case = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=A__ )
_snake_case = torch.rand(self.n_targets , 4 , device=A__ )
labels.append(A__ )
_snake_case = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Dict ) -> List[Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def UpperCamelCase_ ( self : Any , A__ : Any , A__ : str , A__ : Tuple ) -> Dict:
_snake_case = YolosModel(config=A__ )
model.to(A__ )
model.eval()
_snake_case = model(A__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def UpperCamelCase_ ( self : Dict , A__ : List[str] , A__ : Optional[Any] , A__ : str ) -> int:
_snake_case = YolosForObjectDetection(A__ )
model.to(A__ )
model.eval()
_snake_case = model(pixel_values=A__ )
_snake_case = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
_snake_case = model(pixel_values=A__ , labels=A__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def UpperCamelCase_ ( self : Optional[Any] ) -> Tuple:
_snake_case = self.prepare_config_and_inputs()
_snake_case, _snake_case, _snake_case = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( __lowercase , __lowercase , unittest.TestCase ):
UpperCamelCase_ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
UpperCamelCase_ : int = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
UpperCamelCase_ : List[str] = False
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Tuple = False
def UpperCamelCase_ ( self : Dict , A__ : List[Any] , A__ : List[str] , A__ : Optional[int]=False ) -> Optional[int]:
_snake_case = super()._prepare_for_class(A__ , A__ , return_labels=A__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
_snake_case = []
for i in range(self.model_tester.batch_size ):
_snake_case = {}
_snake_case = torch.ones(
size=(self.model_tester.n_targets,) , device=A__ , dtype=torch.long )
_snake_case = torch.ones(
self.model_tester.n_targets , 4 , device=A__ , dtype=torch.float )
labels.append(A__ )
_snake_case = labels
return inputs_dict
def UpperCamelCase_ ( self : List[Any] ) -> List[str]:
_snake_case = YolosModelTester(self )
_snake_case = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def UpperCamelCase_ ( self : Optional[int] ) -> Dict:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : List[Any] ) -> str:
# YOLOS does not use inputs_embeds
pass
def UpperCamelCase_ ( self : Union[str, Any] ) -> List[str]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(A__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , nn.Linear ) )
def UpperCamelCase_ ( self : List[Any] ) -> Optional[Any]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(A__ )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A__ )
def UpperCamelCase_ ( self : List[str] ) -> List[Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase_ ( self : Union[str, Any] ) -> int:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
# in YOLOS, the seq_len is different
_snake_case = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(A__ , A__ ) )
_snake_case = outputs.attentions
self.assertEqual(len(A__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(A__ , A__ ) )
_snake_case = outputs.attentions
self.assertEqual(len(A__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_snake_case = len(A__ )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(A__ , A__ ) )
_snake_case = 1
self.assertEqual(out_len + added_hidden_states , len(A__ ) )
_snake_case = outputs.attentions
self.assertEqual(len(A__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCamelCase_ ( self : int ) -> Dict:
def check_hidden_states_output(A__ : Optional[int] , A__ : Union[str, Any] , A__ : int ):
_snake_case = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(A__ , A__ ) )
_snake_case = outputs.hidden_states
_snake_case = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A__ ) , A__ )
# YOLOS has a different seq_length
_snake_case = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(A__ , A__ , A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(A__ , A__ , A__ )
def UpperCamelCase_ ( self : Optional[Any] ) -> str:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*A__ )
@slow
def UpperCamelCase_ ( self : List[str] ) -> Dict:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = YolosModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def snake_case_() -> str:
"""simple docstring"""
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Any ) -> str:
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : Tuple ) -> str:
_snake_case = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(A__ )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=A__ , return_tensors='''pt''' ).to(A__ )
# forward pass
with torch.no_grad():
_snake_case = model(inputs.pixel_values )
# verify outputs
_snake_case = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , A__ )
_snake_case = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=A__ , )
_snake_case = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , A__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , A__ , atol=1e-4 ) )
# verify postprocessing
_snake_case = image_processor.post_process_object_detection(
A__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
_snake_case = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(A__ )
_snake_case = [75, 75, 17, 63, 17]
_snake_case = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(A__ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , A__ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , A__ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , A__ ) )
| 278 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class UpperCAmelCase :
def __init__( self :Optional[int] )-> None:
A__ = []
A__ = 0
A__ = 0
def UpperCAmelCase_ ( self :List[str] )-> bool:
return self.head == self.tail
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :Any )-> None:
self.data.append(UpperCAmelCase_ )
A__ = self.tail + 1
def UpperCAmelCase_ ( self :Optional[int] )-> Any:
A__ = self.data[self.head]
A__ = self.head + 1
return ret
def UpperCAmelCase_ ( self :int )-> int:
return self.tail - self.head
def UpperCAmelCase_ ( self :str )-> None:
print(self.data )
print("**************" )
print(self.data[self.head : self.tail] )
class UpperCAmelCase :
def __init__( self :Tuple , lowercase_ :Any )-> None:
A__ = data
A__ = None
A__ = None
A__ = 1
def UpperCAmelCase_ ( self :List[str] )-> Any:
return self.data
def UpperCAmelCase_ ( self :Tuple )-> MyNode | None:
return self.left
def UpperCAmelCase_ ( self :List[Any] )-> MyNode | None:
return self.right
def UpperCAmelCase_ ( self :Union[str, Any] )-> int:
return self.height
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :Any )-> None:
A__ = data
def UpperCAmelCase_ ( self :Tuple , lowercase_ :MyNode | None )-> None:
A__ = node
def UpperCAmelCase_ ( self :str , lowercase_ :MyNode | None )-> None:
A__ = node
def UpperCAmelCase_ ( self :Dict , lowercase_ :int )-> None:
A__ = height
def UpperCamelCase ( _lowerCamelCase : int ):
if node is None:
return 0
return node.get_height()
def UpperCamelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple ):
if a > b:
return a
return b
def UpperCamelCase ( _lowerCamelCase : Optional[Any] ):
print("left rotation node:" , node.get_data() )
A__ = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(_SCREAMING_SNAKE_CASE )
A__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_SCREAMING_SNAKE_CASE )
A__ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_SCREAMING_SNAKE_CASE )
return ret
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] ):
print("right rotation node:" , node.get_data() )
A__ = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(_SCREAMING_SNAKE_CASE )
A__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_SCREAMING_SNAKE_CASE )
A__ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_SCREAMING_SNAKE_CASE )
return ret
def UpperCamelCase ( _lowerCamelCase : List[Any] ):
A__ = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_SCREAMING_SNAKE_CASE ) )
return right_rotation(_SCREAMING_SNAKE_CASE )
def UpperCamelCase ( _lowerCamelCase : Any ):
A__ = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_SCREAMING_SNAKE_CASE ) )
return left_rotation(_SCREAMING_SNAKE_CASE )
def UpperCamelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] ):
if node is None:
return MyNode(_SCREAMING_SNAKE_CASE )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _SCREAMING_SNAKE_CASE ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
A__ = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
A__ = right_rotation(_SCREAMING_SNAKE_CASE )
else:
A__ = lr_rotation(_SCREAMING_SNAKE_CASE )
else:
node.set_right(insert_node(node.get_right() , _SCREAMING_SNAKE_CASE ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
A__ = node.get_right()
assert right_child is not None
if data < right_child.get_data():
A__ = rl_rotation(_SCREAMING_SNAKE_CASE )
else:
A__ = left_rotation(_SCREAMING_SNAKE_CASE )
A__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_SCREAMING_SNAKE_CASE )
return node
def UpperCamelCase ( _lowerCamelCase : Optional[Any] ):
while True:
A__ = root.get_right()
if right_child is None:
break
A__ = right_child
return root.get_data()
def UpperCamelCase ( _lowerCamelCase : Optional[Any] ):
while True:
A__ = root.get_left()
if left_child is None:
break
A__ = left_child
return root.get_data()
def UpperCamelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[str] ):
A__ = root.get_left()
A__ = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
A__ = get_left_most(_SCREAMING_SNAKE_CASE )
root.set_data(_SCREAMING_SNAKE_CASE )
root.set_right(del_node(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
elif left_child is not None:
A__ = left_child
elif right_child is not None:
A__ = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data" )
return root
else:
root.set_left(del_node(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if get_height(_SCREAMING_SNAKE_CASE ) - get_height(_SCREAMING_SNAKE_CASE ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
A__ = left_rotation(_SCREAMING_SNAKE_CASE )
else:
A__ = rl_rotation(_SCREAMING_SNAKE_CASE )
elif get_height(_SCREAMING_SNAKE_CASE ) - get_height(_SCREAMING_SNAKE_CASE ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
A__ = right_rotation(_SCREAMING_SNAKE_CASE )
else:
A__ = lr_rotation(_SCREAMING_SNAKE_CASE )
A__ = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(_SCREAMING_SNAKE_CASE )
return root
class UpperCAmelCase :
def __init__( self :Dict )-> None:
A__ = None
def UpperCAmelCase_ ( self :List[str] )-> int:
return get_height(self.root )
def UpperCAmelCase_ ( self :str , lowercase_ :Any )-> None:
print("insert:" + str(UpperCAmelCase_ ) )
A__ = insert_node(self.root , UpperCAmelCase_ )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Any )-> None:
print("delete:" + str(UpperCAmelCase_ ) )
if self.root is None:
print("Tree is empty!" )
return
A__ = del_node(self.root , UpperCAmelCase_ )
def __str__( self :Dict , )-> str: # a level traversale, gives a more intuitive look on the tree
A__ = ""
A__ = MyQueue()
q.push(self.root )
A__ = self.get_height()
if layer == 0:
return output
A__ = 0
while not q.is_empty():
A__ = q.pop()
A__ = " " * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(UpperCAmelCase_ )
q.push(UpperCAmelCase_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
A__ = cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 , UpperCAmelCase_ ) - 1:
A__ = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def UpperCamelCase ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__lowerCAmelCase : Any =AVLtree()
__lowerCAmelCase : Any =list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 237 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
if num < 0:
return False
snake_case_ = num
snake_case_ = 0
while num > 0:
snake_case_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 0 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int]="attention" ):
'''simple docstring'''
lowercase__ : str = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
lowercase__ : Dict = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
lowercase__ : Any = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
lowercase__ : Tuple = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=False ):
'''simple docstring'''
if split_mlp_wi:
lowercase__ : Optional[int] = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
lowercase__ : Optional[int] = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
lowercase__ : str = (wi_a, wi_a)
else:
lowercase__ : Union[str, Any] = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""]
lowercase__ : Union[str, Any] = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""]
def snake_case__ ( SCREAMING_SNAKE_CASE_ : dict , *, SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool ):
'''simple docstring'''
lowercase__ : str = traverse_util.flatten_dict(variables['target'] )
lowercase__ : List[str] = {'/'.join(SCREAMING_SNAKE_CASE_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase__ : Optional[Any] = 'encoder/layers_0/mlp/wi_0/kernel' in old
print('Split MLP:' , SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[Any] = collections.OrderedDict()
# Shared embeddings.
lowercase__ : Dict = old['token_embedder/embedding']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE_ ):
# Block i, layer 0 (Self Attention).
lowercase__ : Union[str, Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' , 'pre_attention_layer_norm' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' , 'attention' )
lowercase__ : Optional[int] = layer_norm
lowercase__ : Dict = k.T
lowercase__ : List[Any] = o.T
lowercase__ : Dict = q.T
lowercase__ : int = v.T
# Block i, layer 1 (MLP).
lowercase__ : List[Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' , 'pre_mlp_layer_norm' )
lowercase__ , lowercase__ : Optional[Any] = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' , SCREAMING_SNAKE_CASE_ )
lowercase__ : str = layer_norm
if split_mlp_wi:
lowercase__ : Optional[int] = wi[0].T
lowercase__ : str = wi[1].T
else:
lowercase__ : Union[str, Any] = wi.T
lowercase__ : List[Any] = wo.T
lowercase__ : Tuple = old[
'encoder/relpos_bias/rel_embedding'
].T
lowercase__ : List[str] = old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE_ ):
# Block i, layer 0 (Self Attention).
lowercase__ : int = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'pre_self_attention_layer_norm' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'self_attention' )
lowercase__ : Optional[Any] = layer_norm
lowercase__ : Optional[Any] = k.T
lowercase__ : Any = o.T
lowercase__ : Optional[int] = q.T
lowercase__ : Optional[int] = v.T
# Block i, layer 1 (Cross Attention).
lowercase__ : int = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'pre_cross_attention_layer_norm' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[Any] = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'encoder_decoder_attention' )
lowercase__ : Optional[int] = layer_norm
lowercase__ : List[Any] = k.T
lowercase__ : Union[str, Any] = o.T
lowercase__ : Any = q.T
lowercase__ : Union[str, Any] = v.T
# Block i, layer 2 (MLP).
lowercase__ : Optional[Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'pre_mlp_layer_norm' )
lowercase__ , lowercase__ : List[str] = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = layer_norm
if split_mlp_wi:
lowercase__ : List[Any] = wi[0].T
lowercase__ : str = wi[1].T
else:
lowercase__ : Dict = wi.T
lowercase__ : Union[str, Any] = wo.T
lowercase__ : str = old['decoder/decoder_norm/scale']
lowercase__ : List[str] = old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase__ : Any = old['decoder/logits_dense/kernel'].T
return new
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : bool ):
'''simple docstring'''
lowercase__ : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase__ : Any = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase__ : Tuple = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
lowercase__ : List[str] = state_dict['shared.weight']
return state_dict
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
lowercase__ : int = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[Any] = convert_tax_to_pytorch(SCREAMING_SNAKE_CASE_ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = make_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : bool = False ):
'''simple docstring'''
lowercase__ : Any = TaConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase__ : Any = TaEncoderModel(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ : Dict = TaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE_ )
print('Done' )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
snake_case_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 216 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def snake_case__ ( SCREAMING_SNAKE_CASE_ : BertModel , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : Tuple = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
lowercase__ : Dict = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
lowercase__ : str = model.state_dict()
def to_tf_var_name(SCREAMING_SNAKE_CASE_ : str ):
for patt, repl in iter(SCREAMING_SNAKE_CASE_ ):
lowercase__ : Optional[int] = name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return f"""bert/{name}"""
def create_tf_var(SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : tf.Session ):
lowercase__ : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
lowercase__ : List[str] = tf.get_variable(dtype=SCREAMING_SNAKE_CASE_ , shape=tensor.shape , name=SCREAMING_SNAKE_CASE_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(SCREAMING_SNAKE_CASE_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase__ : Tuple = to_tf_var_name(SCREAMING_SNAKE_CASE_ )
lowercase__ : str = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase__ : str = torch_tensor.T
lowercase__ : Any = create_tf_var(tensor=SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , session=SCREAMING_SNAKE_CASE_ )
tf.keras.backend.set_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[int] = session.run(SCREAMING_SNAKE_CASE_ )
print(f"""Successfully created {tf_name}: {np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
lowercase__ : Tuple = tf.train.Saver(tf.trainable_variables() )
saver.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[Any]=None ):
'''simple docstring'''
lowercase__ : List[str] = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='Directory in which to save tensorflow model' )
lowercase__ : List[str] = parser.parse_args(SCREAMING_SNAKE_CASE_ )
lowercase__ : int = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=SCREAMING_SNAKE_CASE_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 216 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : int ) -> List[Any]:
torch.manual_seed(0 )
a = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
torch.manual_seed(0 )
a = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
a = self.dummy_uncond_unet
a = DDIMScheduler()
a = self.dummy_vq_model
a = LDMPipeline(unet=__lowerCamelCase , vqvae=__lowerCamelCase , scheduler=__lowerCamelCase )
ldm.to(__lowerCamelCase )
ldm.set_progress_bar_config(disable=__lowerCamelCase )
a = torch.manual_seed(0 )
a = ldm(generator=__lowerCamelCase , num_inference_steps=2 , output_type="numpy" ).images
a = torch.manual_seed(0 )
a = ldm(generator=__lowerCamelCase , num_inference_steps=2 , output_type="numpy" , return_dict=__lowerCamelCase )[0]
a = image[0, -3:, -3:, -1]
a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a = np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172] )
a = 1e-2 if torch_device != "mps" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> List[str]:
a = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(__lowerCamelCase )
ldm.set_progress_bar_config(disable=__lowerCamelCase )
a = torch.manual_seed(0 )
a = ldm(generator=__lowerCamelCase , num_inference_steps=5 , output_type="numpy" ).images
a = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
a = np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447] )
a = 1e-2 if torch_device != "mps" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 107 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 107 | 1 |
import argparse
import json
from tqdm import tqdm
def UpperCAmelCase__ ( ):
lowercase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path", type=_lowerCAmelCase, default="biencoder-nq-dev.json", help="Path to raw DPR training data", )
parser.add_argument(
"--evaluation_set", type=_lowerCAmelCase, help="where to store parsed evaluation_set file", )
parser.add_argument(
"--gold_data_path", type=_lowerCAmelCase, help="where to store parsed gold_data_path file", )
lowercase :Optional[Any] = parser.parse_args()
with open(args.src_path, "r" ) as src_file, open(args.evaluation_set, "w" ) as eval_file, open(
args.gold_data_path, "w" ) as gold_file:
lowercase :int = json.load(_lowerCAmelCase )
for dpr_record in tqdm(_lowerCAmelCase ):
lowercase :Union[str, Any] = dpr_record["question"]
lowercase :Any = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(_lowerCAmelCase ) + "\n" )
if __name__ == "__main__":
main()
| 354 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_UpperCAmelCase : Union[str, Any] = datasets.logging.get_logger(__name__)
_UpperCAmelCase : Tuple = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
_UpperCAmelCase : int = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
_UpperCAmelCase : Union[str, Any] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="dummy_doc" ):
lowercase :str = {doc: key_lines}
lowercase :Union[str, Any] = {doc: sys_lines}
lowercase :Tuple = {}
lowercase :Optional[Any] = 0
lowercase :str = 0
lowercase :Optional[Any] = 0
lowercase :str = 0
lowercase :Union[str, Any] = 0
lowercase :int = 0
lowercase , lowercase :str = reader.get_doc_mentions(lowerCamelCase, key_doc_lines[doc], lowerCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
lowercase :Any = reader.set_annotated_parse_trees(lowerCamelCase, key_doc_lines[doc], lowerCamelCase, lowerCamelCase )
lowercase , lowercase :int = reader.get_doc_mentions(lowerCamelCase, sys_doc_lines[doc], lowerCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowercase :str = reader.set_annotated_parse_trees(lowerCamelCase, key_doc_lines[doc], lowerCamelCase, lowerCamelCase )
if remove_nested:
lowercase , lowercase :List[str] = reader.remove_nested_coref_mentions(lowerCamelCase, lowerCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowercase , lowercase :Any = reader.remove_nested_coref_mentions(lowerCamelCase, lowerCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowercase :Optional[Any] = reader.get_mention_assignments(lowerCamelCase, lowerCamelCase )
lowercase :str = reader.get_mention_assignments(lowerCamelCase, lowerCamelCase )
lowercase :Optional[int] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
"Number of resulting singleton clusters in the key "
F"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
F"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
"files, respectively" )
return doc_coref_infos
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase :Union[str, Any] = get_coref_infos(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase :List[str] = {}
lowercase :Dict = 0
lowercase :Tuple = 0
for name, metric in metrics:
lowercase , lowercase , lowercase :int = evaluator.evaluate_documents(lowerCamelCase, lowerCamelCase, beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"{name}/recall": recall, F"{name}/precision": precision, F"{name}/f1": fa} )
logger.info(
name.ljust(10 ), F"Recall: {recall * 100:.2f}", F" Precision: {precision * 100:.2f}", F" F1: {fa * 100:.2f}", )
if conll_subparts_num == 3:
lowercase :Any = (conll / 3) * 100
logger.info(F"CoNLL score: {conll:.2f}" )
output_scores.update({"conll_score": conll} )
return output_scores
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :str = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
lowercase :Union[str, Any] = line.split()[5]
if not parse_col == "-":
lowercase :Optional[int] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __lowerCAmelCase ( datasets.Metric):
def SCREAMING_SNAKE_CASE ( self: Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Dict , _lowerCAmelCase: Tuple , _lowerCAmelCase: Tuple=True , _lowerCAmelCase: Dict=False , _lowerCAmelCase: Optional[int]=False , _lowerCAmelCase: Dict=False ):
lowercase :Any = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
lowercase :List[str] = util.check_gold_parse_annotation(_lowerCAmelCase )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowercase :List[str] = evaluate(
key_lines=_lowerCAmelCase , sys_lines=_lowerCAmelCase , metrics=_lowerCAmelCase , NP_only=_lowerCAmelCase , remove_nested=_lowerCAmelCase , keep_singletons=_lowerCAmelCase , min_span=_lowerCAmelCase , )
return score
| 158 | 0 |
def lowerCamelCase__ ( a , a ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 121 |
def lowerCamelCase__ ( a ) -> bool:
_A: Dict = [int(a ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(a ) == 4 and all(0 <= int(a ) <= 2_54 for octet in octets )
if __name__ == "__main__":
UpperCAmelCase__ : str = input().strip()
UpperCAmelCase__ : Any = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 121 | 1 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __UpperCAmelCase ( a_: Optional[int] ):
_UpperCAmelCase : Union[str, Any] = filter(lambda a_ : p.requires_grad, model.parameters() )
_UpperCAmelCase : Optional[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__a = logging.getLogger(__name__)
def __UpperCAmelCase ( a_: Any, a_: Optional[Any] ):
if metric == "rouge2":
_UpperCAmelCase : Union[str, Any] = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_UpperCAmelCase : Dict = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_UpperCAmelCase : Dict = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
_UpperCAmelCase : str = ModelCheckpoint(
dirpath=a_, filename=a_, monitor=f"""val_{metric}""", mode="max", save_top_k=3, every_n_epochs=1, )
return checkpoint_callback
def __UpperCAmelCase ( a_: Union[str, Any], a_: Optional[int] ):
return EarlyStopping(
monitor=f"""val_{metric}""", mode="min" if "loss" in metric else "max", patience=a_, verbose=a_, )
class A__ ( pl.Callback ):
"""simple docstring"""
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Tuple = {F"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase__ )
@rank_zero_only
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : pl.Trainer , lowerCAmelCase__ : pl.LightningModule , lowerCAmelCase__ : str , lowerCAmelCase__ : str=True ) -> None:
"""simple docstring"""
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_UpperCAmelCase : Optional[Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_UpperCAmelCase : Optional[int] = Path(pl_module.hparams.output_dir )
if type_path == "test":
_UpperCAmelCase : Any = od / "test_results.txt"
_UpperCAmelCase : Optional[int] = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase : Tuple = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
_UpperCAmelCase : Optional[Any] = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCAmelCase__ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase__ )
with open(lowerCAmelCase__ , "a+" ) as writer:
for key in sorted(lowerCAmelCase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase : Union[str, Any] = metrics[key]
if isinstance(lowerCAmelCase__ , torch.Tensor ):
_UpperCAmelCase : List[str] = val.item()
_UpperCAmelCase : Any = F"""{key}: {val:.6f}\n"""
writer.write(lowerCAmelCase__ )
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase : List[Any] = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(lowerCAmelCase__ )
@rank_zero_only
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] ) -> str:
"""simple docstring"""
try:
_UpperCAmelCase : Any = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase : Union[str, Any] = pl_module.model.num_parameters()
_UpperCAmelCase : List[str] = count_trainable_parameters(lowerCAmelCase__ )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : pl.Trainer , lowerCAmelCase__ : pl.LightningModule ) -> Dict:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase__ , lowerCAmelCase__ , "test" )
@rank_zero_only
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : pl.Trainer , lowerCAmelCase__ : Any ) -> List[str]:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 17 | '''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( a_: list[int] ):
if not nums:
return 0
_UpperCAmelCase : int = nums[0]
_UpperCAmelCase : Dict = 0
for num in nums[1:]:
_UpperCAmelCase , _UpperCAmelCase : Any = (
max_excluding + num,
max(a_, a_ ),
)
return max(a_, a_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 17 | 1 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : NDArray[floataa] , SCREAMING_SNAKE_CASE__ : NDArray[floataa] , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , ):
__UpperCamelCase , __UpperCamelCase =coefficient_matrix.shape
__UpperCamelCase , __UpperCamelCase =constant_matrix.shape
if rowsa != colsa:
__UpperCamelCase =F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(SCREAMING_SNAKE_CASE__ )
if colsa != 1:
__UpperCamelCase =F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(SCREAMING_SNAKE_CASE__ )
if rowsa != rowsa:
__UpperCamelCase =(
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != rowsa:
__UpperCamelCase =(
'Number of initial values must be equal to number of rows in coefficient '
F'matrix but received {len(SCREAMING_SNAKE_CASE__ )} and {rowsa}'
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
__UpperCamelCase =np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__UpperCamelCase , __UpperCamelCase =table.shape
strictly_diagonally_dominant(SCREAMING_SNAKE_CASE__ )
# Iterates the whole matrix for given number of times
for _ in range(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =[]
for row in range(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =0
for col in range(SCREAMING_SNAKE_CASE__ ):
if col == row:
__UpperCamelCase =table[row][col]
elif col == cols - 1:
__UpperCamelCase =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__UpperCamelCase =(temp + val) / denom
new_val.append(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =new_val
return [float(SCREAMING_SNAKE_CASE__ ) for i in new_val]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : NDArray[floataa] ):
__UpperCamelCase , __UpperCamelCase =table.shape
__UpperCamelCase =True
for i in range(0 , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
_A = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
_A = {'vinai/bartpho-syllable': 1024}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_ = None , **A_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =monolingual_vocab_file
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__UpperCamelCase ={}
__UpperCamelCase =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(A_ ) not in self.fairseq_tokens_to_ids:
__UpperCamelCase =cnt
cnt += 1
with open(A_ , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
__UpperCamelCase =line.strip().split()[0]
__UpperCamelCase =len(self.fairseq_tokens_to_ids )
if str(A_ ) not in self.fairseq_tokens_to_ids:
__UpperCamelCase =len(self.fairseq_tokens_to_ids )
__UpperCamelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Any:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
__UpperCamelCase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A_ ) -> List[str]:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
__UpperCamelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _a ( self ) -> Any:
return len(self.fairseq_ids_to_tokens )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> str:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _a ( self , A_ ) -> int:
return self.fairseq_ids_to_tokens[index]
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =''.join(A_ ).replace(A_ , ' ' ).strip()
return out_string
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
A_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , A_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(A_ , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(A_ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 62 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "blenderbot-small"
lowerCAmelCase__ = ["past_key_values"]
lowerCAmelCase__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any=50_265 , __SCREAMING_SNAKE_CASE : str=512 , __SCREAMING_SNAKE_CASE : Any=8 , __SCREAMING_SNAKE_CASE : int=2_048 , __SCREAMING_SNAKE_CASE : str=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=8 , __SCREAMING_SNAKE_CASE : Any=2_048 , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : List[str]="gelu" , __SCREAMING_SNAKE_CASE : Any=512 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Any=1 , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=0 , __SCREAMING_SNAKE_CASE : Any=1 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : str=2 , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , forced_eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
class lowerCAmelCase__ ( a ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE = {0: """batch"""}
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """decoder_sequence"""}
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__SCREAMING_SNAKE_CASE = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers
for i in range(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = {0: """batch""", 2: """past_sequence + sequence"""}
__SCREAMING_SNAKE_CASE = {0: """batch""", 2: """past_sequence + sequence"""}
else:
__SCREAMING_SNAKE_CASE = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def UpperCAmelCase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE = super().outputs
else:
__SCREAMING_SNAKE_CASE = super(__SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers
for i in range(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = {0: """batch""", 2: """past_sequence + sequence"""}
__SCREAMING_SNAKE_CASE = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Generate decoder inputs
__SCREAMING_SNAKE_CASE = seq_length if not self.use_past else 1
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__SCREAMING_SNAKE_CASE = dict(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs["""input_ids"""].shape
__SCREAMING_SNAKE_CASE = common_inputs["""decoder_input_ids"""].shape[1]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_attention_heads
__SCREAMING_SNAKE_CASE = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE = decoder_seq_length + 3
__SCREAMING_SNAKE_CASE = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__SCREAMING_SNAKE_CASE = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )] , dim=1 )
__SCREAMING_SNAKE_CASE = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers
__SCREAMING_SNAKE_CASE = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) - min_num_layers
__SCREAMING_SNAKE_CASE = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(__SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(__SCREAMING_SNAKE_CASE ),
torch.zeros(__SCREAMING_SNAKE_CASE ),
torch.zeros(__SCREAMING_SNAKE_CASE ),
torch.zeros(__SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
__SCREAMING_SNAKE_CASE = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) )
return common_inputs
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE = seqlen + 2
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_attention_heads
__SCREAMING_SNAKE_CASE = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE = common_inputs["""attention_mask"""].dtype
__SCREAMING_SNAKE_CASE = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
__SCREAMING_SNAKE_CASE = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(__SCREAMING_SNAKE_CASE )
]
return common_inputs
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = compute_effective_axis_dimension(
__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__SCREAMING_SNAKE_CASE = tokenizer.num_special_tokens_to_add(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = compute_effective_axis_dimension(
__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
__SCREAMING_SNAKE_CASE = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
__SCREAMING_SNAKE_CASE = dict(tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) )
return common_inputs
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
elif self.task == "causal-lm":
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_causal_lm(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
return common_inputs
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE = super()._flatten_past_key_values_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = super(__SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 331 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(a )
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Any=None ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {}
if prompt is not None:
__SCREAMING_SNAKE_CASE = prompt
if generate_kwargs is not None:
__SCREAMING_SNAKE_CASE = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__SCREAMING_SNAKE_CASE = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
__SCREAMING_SNAKE_CASE = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : int , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE )
if prompt is not None:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError(
f'Received an invalid text input, got - {type(__SCREAMING_SNAKE_CASE )} - but expected a single string. '
"""Note also that one single text can be provided for conditional image to text generation.""" )
__SCREAMING_SNAKE_CASE = self.model.config.model_type
if model_type == "git":
__SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = self.tokenizer(text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ).input_ids
__SCREAMING_SNAKE_CASE = [self.tokenizer.cls_token_id] + input_ids
__SCREAMING_SNAKE_CASE = torch.tensor(__SCREAMING_SNAKE_CASE ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
__SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(__SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'Model type {model_type} does not support conditional text generation' )
else:
__SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__SCREAMING_SNAKE_CASE = None
return model_inputs
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any]=None ) -> List[str]:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , __SCREAMING_SNAKE_CASE )
and all(x is None for x in model_inputs["""input_ids"""] )
):
__SCREAMING_SNAKE_CASE = None
if generate_kwargs is None:
__SCREAMING_SNAKE_CASE = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__SCREAMING_SNAKE_CASE = model_inputs.pop(self.model.main_input_name )
__SCREAMING_SNAKE_CASE = self.model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return model_outputs
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for output_ids in model_outputs:
__SCREAMING_SNAKE_CASE = {
"""generated_text""": self.tokenizer.decode(
__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , )
}
records.append(__SCREAMING_SNAKE_CASE )
return records
| 331 | 1 |
'''simple docstring'''
from collections.abc import Sequence
from queue import Queue
class A__ :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Any=None ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = start
_UpperCAmelCase : str = end
_UpperCAmelCase : Optional[int] = val
_UpperCAmelCase : Any = (start + end) // 2
_UpperCAmelCase : Union[str, Any] = left
_UpperCAmelCase : Any = right
def __repr__( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return F"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class A__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : Sequence , lowerCAmelCase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = collection
_UpperCAmelCase : List[Any] = function
if self.collection:
_UpperCAmelCase : Union[str, Any] = self._build_tree(0 , len(lowerCAmelCase__ ) - 1 )
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
self._update_tree(self.root , lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] ) -> Any:
"""simple docstring"""
return self._query_range(self.root , lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any ) -> Optional[int]:
"""simple docstring"""
if start == end:
return SegmentTreeNode(lowerCAmelCase__ , lowerCAmelCase__ , self.collection[start] )
_UpperCAmelCase : Dict = (start + end) // 2
_UpperCAmelCase : Union[str, Any] = self._build_tree(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : str = self._build_tree(mid + 1 , lowerCAmelCase__ )
return SegmentTreeNode(lowerCAmelCase__ , lowerCAmelCase__ , self.fn(left.val , right.val ) , lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if node.start == i and node.end == i:
_UpperCAmelCase : Optional[Any] = val
return
if i <= node.mid:
self._update_tree(node.left , lowerCAmelCase__ , lowerCAmelCase__ )
else:
self._update_tree(node.right , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = self.fn(node.left.val , node.right.val )
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str ) -> int:
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , lowerCAmelCase__ , lowerCAmelCase__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , lowerCAmelCase__ , node.mid ) , self._query_range(node.right , node.mid + 1 , lowerCAmelCase__ ) , )
else:
# range in right child tree
return self._query_range(node.right , lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if self.root is not None:
_UpperCAmelCase : Union[str, Any] = Queue()
queue.put(self.root )
while not queue.empty():
_UpperCAmelCase : Optional[int] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
__a = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print() | 145 | '''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
__a = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = '''instructblip_vision_model'''
def __init__( self : str , lowerCAmelCase__ : Dict=1_4_0_8 , lowerCAmelCase__ : int=6_1_4_4 , lowerCAmelCase__ : List[str]=3_9 , lowerCAmelCase__ : int=1_6 , lowerCAmelCase__ : Tuple=2_2_4 , lowerCAmelCase__ : Tuple=1_4 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Union[str, Any]=1e-6 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : Optional[int]=1e-10 , lowerCAmelCase__ : Dict=True , **lowerCAmelCase__ : str , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : List[str] = intermediate_size
_UpperCAmelCase : Optional[int] = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : List[Any] = image_size
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : int = attention_dropout
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : Tuple = qkv_bias
@classmethod
def _lowerCAmelCase ( cls : Optional[int] , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : Any ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
_UpperCAmelCase : int = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = '''instructblip_qformer'''
def __init__( self : List[str] , lowerCAmelCase__ : Union[str, Any]=3_0_5_2_2 , lowerCAmelCase__ : Dict=7_6_8 , lowerCAmelCase__ : Tuple=1_2 , lowerCAmelCase__ : Optional[Any]=1_2 , lowerCAmelCase__ : Union[str, Any]=3_0_7_2 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Dict=5_1_2 , lowerCAmelCase__ : Tuple=0.02 , lowerCAmelCase__ : Optional[int]=1e-12 , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : Union[str, Any]="absolute" , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : int=1_4_0_8 , **lowerCAmelCase__ : List[str] , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = vocab_size
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : List[Any] = intermediate_size
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : List[str] = layer_norm_eps
_UpperCAmelCase : Tuple = position_embedding_type
_UpperCAmelCase : Tuple = cross_attention_frequency
_UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def _lowerCAmelCase ( cls : Dict , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : Optional[int] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase : List[str] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
_UpperCAmelCase : Tuple = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = '''instructblip'''
UpperCamelCase_ : Dict = True
def __init__( self : Tuple , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Union[str, Any]=3_2 , **lowerCAmelCase__ : Dict ) -> Any:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
if vision_config is None:
_UpperCAmelCase : List[str] = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
_UpperCAmelCase : Tuple = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
_UpperCAmelCase : int = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
_UpperCAmelCase : List[str] = InstructBlipVisionConfig(**lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = InstructBlipQFormerConfig(**lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = text_config["model_type"] if "model_type" in text_config else "opt"
_UpperCAmelCase : Optional[int] = CONFIG_MAPPING[text_model_type](**lowerCAmelCase__ )
_UpperCAmelCase : Dict = self.text_config.tie_word_embeddings
_UpperCAmelCase : List[Any] = self.text_config.is_encoder_decoder
_UpperCAmelCase : List[str] = num_query_tokens
_UpperCAmelCase : int = self.vision_config.hidden_size
_UpperCAmelCase : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_UpperCAmelCase : int = 1.0
_UpperCAmelCase : Dict = 0.02
@classmethod
def _lowerCAmelCase ( cls : Dict , lowerCAmelCase__ : InstructBlipVisionConfig , lowerCAmelCase__ : InstructBlipQFormerConfig , lowerCAmelCase__ : PretrainedConfig , **lowerCAmelCase__ : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase__ , )
def _lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
_UpperCAmelCase : List[Any] = self.qformer_config.to_dict()
_UpperCAmelCase : List[Any] = self.text_config.to_dict()
_UpperCAmelCase : Dict = self.__class__.model_type
return output | 145 | 1 |
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
__lowerCAmelCase = {
'''Salesforce/codegen-350M-mono''': 2_048,
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Optional[Any] = ['input_ids', 'attention_mask']
lowerCAmelCase : str = CodeGenTokenizer
def __init__( self : Dict ,_UpperCAmelCase : Any=None ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : Dict="<|endoftext|>" ,_UpperCAmelCase : Dict="<|endoftext|>" ,_UpperCAmelCase : List[Any]="<|endoftext|>" ,_UpperCAmelCase : Tuple=False ,**_UpperCAmelCase : Union[str, Any] ,):
super().__init__(
_UpperCAmelCase ,_UpperCAmelCase ,tokenizer_file=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,bos_token=_UpperCAmelCase ,eos_token=_UpperCAmelCase ,add_prefix_space=_UpperCAmelCase ,**_UpperCAmelCase ,)
if kwargs.pop('add_bos_token' ,_UpperCAmelCase ):
_a : Any = kwargs.pop('name_or_path' ,'' )
raise ValueError(
'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'
'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'
F"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
F"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'
' so that the fast tokenizer works correctly.' )
_a : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,_UpperCAmelCase ) != add_prefix_space:
_a : List[str] = getattr(_UpperCAmelCase ,pre_tok_state.pop('type' ) )
_a : int = add_prefix_space
_a : str = pre_tok_class(**_UpperCAmelCase )
_a : Optional[Any] = add_prefix_space
def __lowercase ( self : List[str] ,*_UpperCAmelCase : Any ,**_UpperCAmelCase : str ):
_a : Optional[Any] = kwargs.get('is_split_into_words' ,_UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : int ,*_UpperCAmelCase : int ,**_UpperCAmelCase : Union[str, Any] ):
_a : Union[str, Any] = kwargs.get('is_split_into_words' ,_UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ):
_a : Tuple = self._tokenizer.model.save(_UpperCAmelCase ,name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : bool = None ,_UpperCAmelCase : Optional[List[str]] = None ,**_UpperCAmelCase : List[str] ,):
_a : List[Any] = super().decode(
token_ids=_UpperCAmelCase ,skip_special_tokens=_UpperCAmelCase ,clean_up_tokenization_spaces=_UpperCAmelCase ,**_UpperCAmelCase ,)
if truncate_before_pattern is not None and len(_UpperCAmelCase ) > 0:
_a : Optional[int] = self.truncate(_UpperCAmelCase ,_UpperCAmelCase )
return decoded_text
def __lowercase ( self : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[Any] ):
def find_re(_UpperCAmelCase : int ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : int ):
_a : int = pattern.search(_UpperCAmelCase ,_UpperCAmelCase )
return m.start() if m else -1
_a : Union[str, Any] = [re.compile(_UpperCAmelCase ,re.MULTILINE ) for pattern in truncate_before_pattern]
_a : Tuple = list(re.finditer('^print' ,_UpperCAmelCase ,re.MULTILINE ) )
if len(_UpperCAmelCase ) > 1:
_a : str = completion[: prints[1].start()]
_a : Tuple = list(re.finditer('^def' ,_UpperCAmelCase ,re.MULTILINE ) )
if len(_UpperCAmelCase ) > 1:
_a : Union[str, Any] = completion[: defs[1].start()]
_a : Union[str, Any] = 0
_a : Optional[Any] = [
pos for pos in [find_re(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) for terminal in terminals] if pos != -1
]
if len(_UpperCAmelCase ) > 0:
return completion[: min(_UpperCAmelCase )]
else:
return completion
| 363 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Optional[int] = 'distilbert'
lowerCAmelCase : Optional[int] = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self : str ,_UpperCAmelCase : Optional[Any]=30522 ,_UpperCAmelCase : int=512 ,_UpperCAmelCase : int=False ,_UpperCAmelCase : str=6 ,_UpperCAmelCase : int=12 ,_UpperCAmelCase : Union[str, Any]=768 ,_UpperCAmelCase : Optional[Any]=4 * 768 ,_UpperCAmelCase : int=0.1 ,_UpperCAmelCase : Union[str, Any]=0.1 ,_UpperCAmelCase : Tuple="gelu" ,_UpperCAmelCase : Any=0.02 ,_UpperCAmelCase : List[Any]=0.1 ,_UpperCAmelCase : Dict=0.2 ,_UpperCAmelCase : Union[str, Any]=0 ,**_UpperCAmelCase : List[Any] ,):
_a : Optional[Any] = vocab_size
_a : Dict = max_position_embeddings
_a : Optional[int] = sinusoidal_pos_embds
_a : List[Any] = n_layers
_a : Any = n_heads
_a : int = dim
_a : Optional[Any] = hidden_dim
_a : str = dropout
_a : Optional[Any] = attention_dropout
_a : Any = activation
_a : Tuple = initializer_range
_a : Union[str, Any] = qa_dropout
_a : List[Any] = seq_classif_dropout
super().__init__(**_UpperCAmelCase ,pad_token_id=_UpperCAmelCase )
class __magic_name__ ( _UpperCamelCase ):
@property
def __lowercase ( self : Union[str, Any] ):
if self.task == "multiple-choice":
_a : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_a : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 107 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] ="""altclip_text_model"""
def __init__( self : Optional[int] , UpperCAmelCase__ : List[Any]=2_5_0_0_0_2 , UpperCAmelCase__ : Optional[Any]=1_0_2_4 , UpperCAmelCase__ : List[Any]=2_4 , UpperCAmelCase__ : int=1_6 , UpperCAmelCase__ : Optional[int]=4_0_9_6 , UpperCAmelCase__ : Dict="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Optional[int]=5_1_4 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : Dict=1e-05 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : List[Any]=0 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[int]=7_6_8 , **UpperCAmelCase__ : int , ) ->Any:
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = initializer_factor
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = position_embedding_type
SCREAMING_SNAKE_CASE : Dict = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = project_dim
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] ="""altclip_vision_model"""
def __init__( self : Union[str, Any] , UpperCAmelCase__ : int=7_6_8 , UpperCAmelCase__ : Union[str, Any]=3_0_7_2 , UpperCAmelCase__ : Any=5_1_2 , UpperCAmelCase__ : Tuple=1_2 , UpperCAmelCase__ : Any=1_2 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : int=2_2_4 , UpperCAmelCase__ : Optional[Any]=3_2 , UpperCAmelCase__ : List[str]="quick_gelu" , UpperCAmelCase__ : List[Any]=1e-5 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : List[str]=1.0 , **UpperCAmelCase__ : Tuple , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Any = projection_dim
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Dict = initializer_factor
SCREAMING_SNAKE_CASE : Tuple = attention_dropout
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Any = hidden_act
@classmethod
def _lowercase ( cls : str , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : Optional[int] ) ->"PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
SCREAMING_SNAKE_CASE : Any = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] ="""altclip"""
UpperCAmelCase__ : Dict =True
def __init__( self : Tuple , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : str=7_6_8 , UpperCAmelCase__ : int=2.65_92 , **UpperCAmelCase__ : Optional[int] ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = kwargs.pop("""text_config_dict""" , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("""vision_config_dict""" , UpperCAmelCase__ )
super().__init__(**UpperCAmelCase__ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
# This is the complete result when using `text_config_dict`.
SCREAMING_SNAKE_CASE : str = AltCLIPTextConfig(**UpperCAmelCase__ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
SCREAMING_SNAKE_CASE : List[Any] = (
f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
f"The value `text_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
SCREAMING_SNAKE_CASE : Optional[Any] = (
f"`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The "
f"value `text_config[\"{key}\"]` will be overriden."
)
logger.warning(UpperCAmelCase__ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
SCREAMING_SNAKE_CASE : Optional[Any] = {}
# This is the complete result when using `vision_config_dict`.
SCREAMING_SNAKE_CASE : Tuple = AltCLIPVisionConfig(**UpperCAmelCase__ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
SCREAMING_SNAKE_CASE : List[Any] = {
str(UpperCAmelCase__ ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
SCREAMING_SNAKE_CASE : Optional[int] = (
f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
f"values. The value `vision_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
f"`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. "
f"The value `vision_config[\"{key}\"]` will be overriden."
)
logger.warning(UpperCAmelCase__ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
SCREAMING_SNAKE_CASE : Any = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
SCREAMING_SNAKE_CASE : int = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = AltCLIPTextConfig(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : int = AltCLIPVisionConfig(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : str = projection_dim
SCREAMING_SNAKE_CASE : List[str] = logit_scale_init_value
SCREAMING_SNAKE_CASE : int = 1.0
@classmethod
def _lowercase ( cls : Dict , UpperCAmelCase__ : AltCLIPTextConfig , UpperCAmelCase__ : AltCLIPVisionConfig , **UpperCAmelCase__ : str ) ->Dict:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.text_config.to_dict()
SCREAMING_SNAKE_CASE : str = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : List[Any] = self.__class__.model_type
return output
| 245 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowercase ( _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE : List[str] = torch.exp(_A )
SCREAMING_SNAKE_CASE : List[str] = torch.sum(_A , dim=1 ) # sum of exp(x_i)
SCREAMING_SNAKE_CASE : Dict = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_A ) - B / A
class a__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase__ : int ) ->List[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Union[str, Any] = config.output_attentions
SCREAMING_SNAKE_CASE : Any = config.output_hidden_states
SCREAMING_SNAKE_CASE : str = nn.ModuleList([BertLayer(UpperCAmelCase__ ) for _ in range(config.num_hidden_layers )] )
SCREAMING_SNAKE_CASE : str = nn.ModuleList([BertHighway(UpperCAmelCase__ ) for _ in range(config.num_hidden_layers )] )
SCREAMING_SNAKE_CASE : Union[str, Any] = [-1 for _ in range(config.num_hidden_layers )]
def _lowercase ( self : str , UpperCAmelCase__ : str ) ->Dict:
"""simple docstring"""
if (type(UpperCAmelCase__ ) is float) or (type(UpperCAmelCase__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
SCREAMING_SNAKE_CASE : Tuple = x
else:
SCREAMING_SNAKE_CASE : Optional[Any] = x
def _lowercase ( self : str , UpperCAmelCase__ : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def _lowercase ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Tuple=None , ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = ()
SCREAMING_SNAKE_CASE : Dict = ()
SCREAMING_SNAKE_CASE : str = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
SCREAMING_SNAKE_CASE : Union[str, Any] = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE : int = layer_module(
UpperCAmelCase__ , UpperCAmelCase__ , head_mask[i] , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = layer_outputs[0]
if self.output_attentions:
SCREAMING_SNAKE_CASE : Dict = all_attentions + (layer_outputs[1],)
SCREAMING_SNAKE_CASE : Optional[int] = (hidden_states,)
if self.output_hidden_states:
SCREAMING_SNAKE_CASE : int = current_outputs + (all_hidden_states,)
if self.output_attentions:
SCREAMING_SNAKE_CASE : int = current_outputs + (all_attentions,)
SCREAMING_SNAKE_CASE : Dict = self.highway[i](UpperCAmelCase__ )
# logits, pooled_output
if not self.training:
SCREAMING_SNAKE_CASE : str = highway_exit[0]
SCREAMING_SNAKE_CASE : Dict = entropy(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
SCREAMING_SNAKE_CASE : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
SCREAMING_SNAKE_CASE : Dict = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCAmelCase__ , i + 1 )
else:
SCREAMING_SNAKE_CASE : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
SCREAMING_SNAKE_CASE : List[str] = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE : Optional[int] = (hidden_states,)
if self.output_hidden_states:
SCREAMING_SNAKE_CASE : Optional[int] = outputs + (all_hidden_states,)
if self.output_attentions:
SCREAMING_SNAKE_CASE : Optional[int] = outputs + (all_attentions,)
SCREAMING_SNAKE_CASE : Any = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , UpperCAmelCase , )
class a__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase__ : Tuple ) ->List[str]:
"""simple docstring"""
super().__init__(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = config
SCREAMING_SNAKE_CASE : Union[str, Any] = BertEmbeddings(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = DeeBertEncoder(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = BertPooler(UpperCAmelCase__ )
self.init_weights()
def _lowercase ( self : str ) ->Optional[int]:
"""simple docstring"""
self.encoder.init_highway_pooler(self.pooler )
def _lowercase ( self : str ) ->Union[str, Any]:
"""simple docstring"""
return self.embeddings.word_embeddings
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ) ->Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = value
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Dict ) ->str:
"""simple docstring"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCAmelCase__ )
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def _lowercase ( self : Dict , UpperCAmelCase__ : str=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Dict=None , ) ->int:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
SCREAMING_SNAKE_CASE : str = input_ids.size()
elif inputs_embeds is not None:
SCREAMING_SNAKE_CASE : Dict = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
SCREAMING_SNAKE_CASE : Any = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
SCREAMING_SNAKE_CASE : List[str] = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ )
if encoder_attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[int] = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ )
if token_type_ids is None:
SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
SCREAMING_SNAKE_CASE : torch.Tensor = self.get_extended_attention_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
SCREAMING_SNAKE_CASE : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
SCREAMING_SNAKE_CASE : Optional[int] = encoder_attention_mask[:, None, None, :]
SCREAMING_SNAKE_CASE : Optional[int] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
SCREAMING_SNAKE_CASE : str = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
SCREAMING_SNAKE_CASE : str = self.get_head_mask(UpperCAmelCase__ , self.config.num_hidden_layers )
SCREAMING_SNAKE_CASE : str = self.embeddings(
input_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.encoder(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE : int = encoder_outputs[0]
SCREAMING_SNAKE_CASE : int = self.pooler(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class a__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = message
SCREAMING_SNAKE_CASE : str = exit_layer # start from 1!
class a__ ( nn.Module ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase__ : Any ) ->List[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = BertPooler(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE : Tuple = nn.Linear(config.hidden_size , config.num_labels )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Tuple ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = encoder_outputs[0]
SCREAMING_SNAKE_CASE : int = self.pooler(UpperCAmelCase__ )
# "return" pooler_output
# BertModel
SCREAMING_SNAKE_CASE : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
SCREAMING_SNAKE_CASE : List[Any] = bmodel_output[1]
SCREAMING_SNAKE_CASE : Any = self.dropout(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : int = self.classifier(UpperCAmelCase__ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , UpperCAmelCase , )
class a__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) ->int:
"""simple docstring"""
super().__init__(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = config.num_labels
SCREAMING_SNAKE_CASE : int = config.num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = DeeBertModel(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE : List[str] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[Any]=-1 , UpperCAmelCase__ : List[str]=False , ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_layers
try:
SCREAMING_SNAKE_CASE : str = self.bert(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
SCREAMING_SNAKE_CASE : Optional[Any] = outputs[1]
SCREAMING_SNAKE_CASE : Any = self.dropout(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = self.classifier(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
SCREAMING_SNAKE_CASE : Optional[int] = e.message
SCREAMING_SNAKE_CASE : Optional[Any] = e.exit_layer
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[0]
if not self.training:
SCREAMING_SNAKE_CASE : Optional[Any] = entropy(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Dict = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE : List[str] = MSELoss()
SCREAMING_SNAKE_CASE : str = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE : Tuple = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
SCREAMING_SNAKE_CASE : List[Any] = []
for highway_exit in outputs[-1]:
SCREAMING_SNAKE_CASE : Optional[int] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCAmelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE : Any = MSELoss()
SCREAMING_SNAKE_CASE : List[str] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE : Any = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : Any = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCAmelCase__ )
if train_highway:
SCREAMING_SNAKE_CASE : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
SCREAMING_SNAKE_CASE : int = (loss,) + outputs
if not self.training:
SCREAMING_SNAKE_CASE : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
SCREAMING_SNAKE_CASE : List[str] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 245 | 1 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ :
def __init__( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : List[str]=30 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Optional[int]=37 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Tuple=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : str=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = image_size
SCREAMING_SNAKE_CASE : int = patch_size
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : Any = use_labels
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = type_sequence_label_size
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE : List[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : List[str] = num_patches + 1
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = self.get_config()
return config, pixel_values, labels
def __A ( self : Optional[Any] ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def __A ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = TFViTModel(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
SCREAMING_SNAKE_CASE : Optional[int] = self.image_size // 2
SCREAMING_SNAKE_CASE : Union[str, Any] = pixel_values[:, :, :image_size, :image_size]
SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCamelCase__ , interpolate_pos_encoding=UpperCamelCase__ , training=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def __A ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = TFViTForImageClassification(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
SCREAMING_SNAKE_CASE : List[Any] = self.image_size // 2
SCREAMING_SNAKE_CASE : Optional[Any] = pixel_values[:, :, :image_size, :image_size]
SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCamelCase__ , interpolate_pos_encoding=UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = TFViTForImageClassification(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : str = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCamelCase_ = (
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFViTModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __A ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __A ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __A ( self : List[str] ):
'''simple docstring'''
pass
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , tf.keras.layers.Layer ) )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(UpperCamelCase__ )
def A ( ):
SCREAMING_SNAKE_CASE : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowercase__ ( unittest.TestCase):
@cached_property
def __A ( self : str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
SCREAMING_SNAKE_CASE : int = self.default_image_processor
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='''tf''' )
# forward pass
SCREAMING_SNAKE_CASE : Optional[int] = model(**UpperCamelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE : List[str] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
| 258 | import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__UpperCamelCase : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowercase__ ( datasets.BuilderConfig):
UpperCamelCase_ = None
def A ( _lowercase , _lowercase , ):
import pyspark
def generate_fn():
SCREAMING_SNAKE_CASE : str = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
SCREAMING_SNAKE_CASE : Any = df_with_partition_id.select('''*''' ).where(f"""part_id = {partition_id}""" ).drop('''part_id''' )
SCREAMING_SNAKE_CASE : Tuple = partition_df.collect()
SCREAMING_SNAKE_CASE : str = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowercase__ ( _BaseExamplesIterable):
def __init__( self : Optional[Any] , UpperCamelCase__ : "pyspark.sql.DataFrame" , UpperCamelCase__ : Union[str, Any]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = df
SCREAMING_SNAKE_CASE : List[str] = partition_order or range(self.df.rdd.getNumPartitions() )
SCREAMING_SNAKE_CASE : Dict = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
'''simple docstring'''
yield from self.generate_examples_fn()
def __A ( self : Tuple , UpperCamelCase__ : np.random.Generator ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCamelCase__ )
return SparkExamplesIterable(self.df , partition_order=UpperCamelCase__ )
def __A ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.split_shard_indices_by_worker(UpperCamelCase__ , UpperCamelCase__ )
return SparkExamplesIterable(self.df , partition_order=UpperCamelCase__ )
@property
def __A ( self : Tuple ):
'''simple docstring'''
return len(self.partition_order )
class lowercase__ ( datasets.DatasetBuilder):
UpperCamelCase_ = SparkConfig
def __init__( self : Union[str, Any] , UpperCamelCase__ : "pyspark.sql.DataFrame" , UpperCamelCase__ : str = None , UpperCamelCase__ : str = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
import pyspark
SCREAMING_SNAKE_CASE : str = pyspark.sql.SparkSession.builder.getOrCreate()
SCREAMING_SNAKE_CASE : List[Any] = df
SCREAMING_SNAKE_CASE : Tuple = working_dir
super().__init__(
cache_dir=UpperCamelCase__ , config_name=str(self.df.semanticHash() ) , **UpperCamelCase__ , )
def __A ( self : Tuple ):
'''simple docstring'''
def create_cache_and_write_probe(UpperCamelCase__ : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCamelCase__ , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
SCREAMING_SNAKE_CASE : Dict = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(UpperCamelCase__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def __A ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __A ( self : str , UpperCamelCase__ : datasets.download.download_manager.DownloadManager ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __A ( self : int , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(UpperCamelCase__ : Tuple ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
SCREAMING_SNAKE_CASE : int = self.df.count()
SCREAMING_SNAKE_CASE : Tuple = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
SCREAMING_SNAKE_CASE : Optional[Any] = (
self.df.limit(UpperCamelCase__ )
.repartition(1 )
.mapInArrow(UpperCamelCase__ , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
SCREAMING_SNAKE_CASE : Optional[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
SCREAMING_SNAKE_CASE : List[str] = min(UpperCamelCase__ , int(approx_total_size / max_shard_size ) )
SCREAMING_SNAKE_CASE : Optional[int] = self.df.repartition(UpperCamelCase__ )
def __A ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int , ):
'''simple docstring'''
import pyspark
SCREAMING_SNAKE_CASE : int = ParquetWriter if file_format == '''parquet''' else ArrowWriter
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self._working_dir , os.path.basename(UpperCamelCase__ ) ) if self._working_dir else fpath
SCREAMING_SNAKE_CASE : Optional[int] = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
SCREAMING_SNAKE_CASE : str = self.config.features
SCREAMING_SNAKE_CASE : Optional[int] = self._writer_batch_size
SCREAMING_SNAKE_CASE : Optional[int] = self._fs.storage_options
def write_arrow(UpperCamelCase__ : Optional[Any] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
SCREAMING_SNAKE_CASE : int = pyspark.TaskContext().taskAttemptId()
SCREAMING_SNAKE_CASE : str = next(UpperCamelCase__ , UpperCamelCase__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = writer_class(
features=UpperCamelCase__ , path=working_fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , writer_batch_size=UpperCamelCase__ , storage_options=UpperCamelCase__ , embed_local_files=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Tuple = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCamelCase__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
SCREAMING_SNAKE_CASE : Optional[int] = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , writer_batch_size=UpperCamelCase__ , storage_options=UpperCamelCase__ , embed_local_files=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : List[str] = pa.Table.from_batches([batch] )
writer.write_table(UpperCamelCase__ )
if writer._num_bytes > 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : int = os.path.join(os.path.dirname(UpperCamelCase__ ) , os.path.basename(UpperCamelCase__ ) )
shutil.move(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = (
self.df.mapInArrow(UpperCamelCase__ , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __A ( self : Dict , UpperCamelCase__ : "datasets.SplitGenerator" , UpperCamelCase__ : str = "arrow" , UpperCamelCase__ : Optional[Union[str, int]] = None , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
self._validate_cache_dir()
SCREAMING_SNAKE_CASE : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = not is_remote_filesystem(self._fs )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join if is_local else posixpath.join
SCREAMING_SNAKE_CASE : List[Any] = '''-TTTTT-SSSSS-of-NNNNN'''
SCREAMING_SNAKE_CASE : List[str] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
SCREAMING_SNAKE_CASE : Dict = path_join(self._output_dir , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Dict = []
for task_id, content in self._prepare_split_single(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : int = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = total_num_examples
SCREAMING_SNAKE_CASE : Dict = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
SCREAMING_SNAKE_CASE : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
SCREAMING_SNAKE_CASE : str = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , ):
rename(
UpperCamelCase__ , fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , fpath.replace('''TTTTT-SSSSS''' , f"""{global_shard_id:05d}""" ).replace('''NNNNN''' , f"""{total_shards:05d}""" ) , )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = task_id_and_num_shards[i]
for shard_id in range(UpperCamelCase__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCamelCase__ , len(UpperCamelCase__ ) ).map(lambda UpperCamelCase__ : _rename_shard(*UpperCamelCase__ ) ).collect()
else:
# don't use any pattern
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , fpath.replace(UpperCamelCase__ , '''''' ) , )
def __A ( self : int , UpperCamelCase__ : "datasets.SplitGenerator" , ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 258 | 1 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __UpperCAmelCase ( __a : bytes ,__a : int ) -> np.array:
"""simple docstring"""
_a : Tuple = F"""{sampling_rate}"""
_a : str = '''1'''
_a : int = '''f32le'''
_a : Optional[int] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(__a ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
_a : List[str] = ffmpeg_process.communicate(__a )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
_a : int = output_stream[0]
_a : List[str] = np.frombuffer(__a ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def __UpperCAmelCase ( __a : int ,__a : float ,__a : str = "f32le" ,) -> List[str]:
"""simple docstring"""
_a : Dict = F"""{sampling_rate}"""
_a : Dict = '''1'''
if format_for_conversion == "s16le":
_a : str = 2
elif format_for_conversion == "f32le":
_a : str = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_a : List[str] = platform.system()
if system == "Linux":
_a : str = '''alsa'''
_a : Optional[Any] = '''default'''
elif system == "Darwin":
_a : Optional[Any] = '''avfoundation'''
_a : List[Any] = ''':0'''
elif system == "Windows":
_a : List[Any] = '''dshow'''
_a : Union[str, Any] = '''default'''
_a : Union[str, Any] = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
_a : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_a : Union[str, Any] = _ffmpeg_stream(__a ,__a )
for item in iterator:
yield item
def __UpperCAmelCase ( __a : int ,__a : float ,__a : Optional[int] = None ,__a : Optional[Union[Tuple[float, float], float]] = None ,__a : str = "f32le" ,) -> Optional[int]:
"""simple docstring"""
if stream_chunk_s is not None:
_a : Dict = stream_chunk_s
else:
_a : Optional[Any] = chunk_length_s
_a : List[str] = ffmpeg_microphone(__a ,__a ,format_for_conversion=__a )
if format_for_conversion == "s16le":
_a : List[Any] = np.intaa
_a : Optional[int] = 2
elif format_for_conversion == "f32le":
_a : List[Any] = np.floataa
_a : Dict = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_a : Union[str, Any] = chunk_length_s / 6
_a : int = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__a ,(int, float) ):
_a : Any = [stride_length_s, stride_length_s]
_a : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_a : Optional[Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_a : List[Any] = datetime.datetime.now()
_a : Optional[int] = datetime.timedelta(seconds=__a )
for item in chunk_bytes_iter(__a ,__a ,stride=(stride_left, stride_right) ,stream=__a ):
# Put everything back in numpy scale
_a : Any = np.frombuffer(item['''raw'''] ,dtype=__a )
_a : Union[str, Any] = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
_a : str = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __UpperCAmelCase ( __a : Optional[int] ,__a : int ,__a : Tuple[int, int] ,__a : bool = False ) -> Any:
"""simple docstring"""
_a : Tuple = b''''''
_a , _a : List[str] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_a : List[str] = 0
for raw in iterator:
acc += raw
if stream and len(__a ) < chunk_len:
_a : List[Any] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__a ) >= chunk_len:
# We are flushing the accumulator
_a : int = (_stride_left, stride_right)
_a : Tuple = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
_a : Any = False
yield item
_a : Dict = stride_left
_a : Optional[int] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__a ) > stride_left:
_a : int = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
_a : List[Any] = False
yield item
def __UpperCAmelCase ( __a : Tuple ,__a : int ) -> Optional[Any]:
"""simple docstring"""
_a : Dict = 2**24 # 16Mo
try:
with subprocess.Popen(__a ,stdout=subprocess.PIPE ,bufsize=__a ) as ffmpeg_process:
while True:
_a : str = ffmpeg_process.stdout.read(__a )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 235 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=7 , _a=False , _a=True , _a=False , _a=False , _a=1_9 , _a=3_2 , _a=5 , _a=4 , _a=3_7 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=1_6 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> Union[str, Any]:
_a : Optional[Any] = parent
_a : Union[str, Any] = batch_size
_a : List[Any] = seq_length
_a : Dict = is_training
_a : int = use_input_mask
_a : str = use_token_type_ids
_a : Any = use_labels
_a : List[Any] = vocab_size
_a : Any = hidden_size
_a : int = num_hidden_layers
_a : str = num_attention_heads
_a : Dict = intermediate_size
_a : List[str] = hidden_act
_a : Optional[Any] = hidden_dropout_prob
_a : Optional[Any] = attention_probs_dropout_prob
_a : int = max_position_embeddings
_a : Tuple = type_vocab_size
_a : str = type_sequence_label_size
_a : Any = initializer_range
_a : Union[str, Any] = num_labels
_a : Dict = num_choices
_a : Union[str, Any] = scope
def __lowercase ( self ) -> List[Any]:
_a : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict = None
if self.use_input_mask:
_a : int = random_attention_mask([self.batch_size, self.seq_length] )
_a : List[Any] = None
_a : Tuple = None
_a : Any = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
_a : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self ) -> str:
_a : Optional[int] = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=_a , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> str:
_a : Union[str, Any] = EsmForProteinFolding(config=_a ).float()
model.to(_a )
model.eval()
_a : str = model(_a , attention_mask=_a )
_a : Union[str, Any] = model(_a )
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def __lowercase ( self ) -> str:
_a : List[str] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Optional[Any] = config_and_inputs
_a : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Any = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = ()
UpperCAmelCase__ : int = {} if is_torch_available() else {}
UpperCAmelCase__ : Optional[int] = False
def __lowercase ( self ) -> List[Any]:
_a : Optional[int] = EsmFoldModelTester(self )
_a : Dict = ConfigTester(self , config_class=_a , hidden_size=3_7 )
def __lowercase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def __lowercase ( self ) -> str:
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
@unittest.skip('''Does not support attention outputs''' )
def __lowercase ( self ) -> int:
pass
@unittest.skip
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __lowercase ( self ) -> int:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def __lowercase ( self ) -> int:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> str:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> Any:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''ESMFold only has one output format.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def __lowercase ( self ) -> Dict:
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __lowercase ( self ) -> List[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@require_torch
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
@slow
def __lowercase ( self ) -> Optional[int]:
_a : Dict = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
_a : Tuple = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
_a : Optional[Any] = model(_a )['''positions''']
_a : Union[str, Any] = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , _a , atol=1e-4 ) )
| 235 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase :List[Any] = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :int = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :List[str] = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :List[Any] = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 363 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ =None
a__ =None
a__ =None # sigma(t_i)
@classmethod
def __lowerCAmelCase ( cls ) -> Tuple:
return cls()
@dataclass
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =42
a__ =42
a__ =42
class _UpperCAmelCase ( a ,a ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
return True
@register_to_config
def __init__( self , A = 0.02 , A = 1_0_0 , A = 1.007 , A = 8_0 , A = 0.05 , A = 5_0 , ) -> Any:
pass
def __lowerCAmelCase ( self ) -> int:
return KarrasVeSchedulerState.create()
def __lowerCAmelCase ( self , A , A , A = () ) -> KarrasVeSchedulerState:
_UpperCAmelCase : Tuple = jnp.arange(0 , A )[::-1].copy()
_UpperCAmelCase : Dict = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=A , schedule=jnp.array(A , dtype=jnp.floataa ) , timesteps=A , )
def __lowerCAmelCase ( self , A , A , A , A , ) -> Tuple[jnp.ndarray, float]:
if self.config.s_min <= sigma <= self.config.s_max:
_UpperCAmelCase : Optional[Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
_UpperCAmelCase : Tuple = 0
# sample eps ~ N(0, S_noise^2 * I)
_UpperCAmelCase : Any = random.split(A , num=1 )
_UpperCAmelCase : Optional[Any] = self.config.s_noise * random.normal(key=A , shape=sample.shape )
_UpperCAmelCase : str = sigma + gamma * sigma
_UpperCAmelCase : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __lowerCAmelCase ( self , A , A , A , A , A , A = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
_UpperCAmelCase : Dict = sample_hat + sigma_hat * model_output
_UpperCAmelCase : int = (sample_hat - pred_original_sample) / sigma_hat
_UpperCAmelCase : Any = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=A , derivative=A , state=A )
def __lowerCAmelCase ( self , A , A , A , A , A , A , A , A = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
_UpperCAmelCase : Union[str, Any] = sample_prev + sigma_prev * model_output
_UpperCAmelCase : List[Any] = (sample_prev - pred_original_sample) / sigma_prev
_UpperCAmelCase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=A , derivative=A , state=A )
def __lowerCAmelCase ( self , A , A , A , A ) -> Any:
raise NotImplementedError()
| 68 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : int = 1_0_0_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = 1, 1
lowerCAmelCase_ :Dict = 2
while True:
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :str = fa + fa
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = fa, f
index += 1
for _ in str(lowercase__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 84 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_A = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
_A = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_A = dict(zip(vocab, range(len(vocab))))
_A = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
_A = Path(tmpdirname)
_A = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
_A = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
_A = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
_A = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_A = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_A = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")
# Test
_A = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_A = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 278 | 0 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCamelCase_ : Optional[int] = """\
Text data.
Second line of data."""
lowerCamelCase_ : int = """file"""
@pytest.fixture(scope='session' )
def lowerCAmelCase( __lowerCamelCase ):
__a = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
__a = bytes(__lowerCamelCase , 'utf-8' )
with zstd.open(__lowerCamelCase , 'wb' ) as f:
f.write(__lowerCamelCase )
return path
@pytest.fixture
def lowerCAmelCase( __lowerCamelCase ):
with open(os.path.join(tmpfs.local_root_dir , __lowerCamelCase ) , 'w' ) as f:
f.write(__lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
__a = input_paths[compression_format]
__a = tmp_path / 'cache'
__a = DownloadConfig(cache_dir=__lowerCamelCase , extract_compressed_file=__lowerCamelCase )
__a = cached_path(__lowerCamelCase , download_config=__lowerCamelCase )
with open(__lowerCamelCase ) as f:
__a = f.read()
with open(__lowerCamelCase ) as f:
__a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = 'custom_cache'
__a = 'custom_extracted_dir'
__a = tmp_path / 'custom_extracted_path'
if default_extracted:
__a = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , __lowerCamelCase )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__lowerCamelCase ) )
__a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__a = xz_file
__a = (
DownloadConfig(extract_compressed_file=__lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__lowerCamelCase )
)
__a = cached_path(__lowerCamelCase , download_config=__lowerCamelCase )
assert Path(__lowerCamelCase ).parent.parts[-2:] == expected
def lowerCAmelCase( __lowerCamelCase ):
# absolute path
__a = str(Path(__lowerCamelCase ).resolve() )
assert cached_path(__lowerCamelCase ) == text_file
# relative path
__a = str(Path(__lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__lowerCamelCase ) == text_file
def lowerCAmelCase( __lowerCamelCase ):
# absolute path
__a = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(__lowerCamelCase ):
cached_path(__lowerCamelCase )
# relative path
__a = './__missing_file__.txt'
with pytest.raises(__lowerCamelCase ):
cached_path(__lowerCamelCase )
def lowerCAmelCase( __lowerCamelCase ):
__a = get_from_cache(f'''tmp://{tmpfs_file}''' )
with open(__lowerCamelCase ) as f:
__a = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , __lowerCamelCase )
def lowerCAmelCase( ):
with pytest.raises(__lowerCamelCase ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __lowerCamelCase )
def lowerCAmelCase( __lowerCamelCase ):
__a = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(__lowerCamelCase ):
http_get('https://huggingface.co' , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __lowerCamelCase )
def lowerCAmelCase( __lowerCamelCase ):
__a = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(__lowerCamelCase ):
ftp_get('ftp://huggingface.co' , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __lowerCamelCase )
def lowerCAmelCase( __lowerCamelCase ):
__a = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(__lowerCamelCase ):
fsspec_get('s3://huggingface.co' , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
fsspec_head('s3://huggingface.co' )
| 366 | import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class a__ :
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
torch.manual_seed(0 )
__a = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__a = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__a = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
__a = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
torch.manual_seed(0 )
__a = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__a = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.414 , time_embedding_act_fn='gelu' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__a = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
__a = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
__a = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = self.get_dummy_components()
__a = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__a = self.get_dummy_inputs(UpperCAmelCase )
__a = inputs['prompt']
__a = inputs['generator']
__a = inputs['num_inference_steps']
__a = inputs['output_type']
if "image" in inputs:
__a = inputs['image']
else:
__a = None
if "mask_image" in inputs:
__a = inputs['mask_image']
else:
__a = None
if "original_image" in inputs:
__a = inputs['original_image']
else:
__a = None
__a , __a = pipe.encode_prompt(UpperCAmelCase )
# inputs with prompt converted to embeddings
__a = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
__a = image
if mask_image is not None:
__a = mask_image
if original_image is not None:
__a = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__a = pipe(**UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase )
__a = self.pipeline_class.from_pretrained(UpperCAmelCase )
pipe_loaded.to(UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase , UpperCAmelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
__a = self.get_dummy_inputs(UpperCAmelCase )
__a = inputs['generator']
__a = inputs['num_inference_steps']
__a = inputs['output_type']
# inputs with prompt converted to embeddings
__a = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
__a = image
if mask_image is not None:
__a = mask_image
if original_image is not None:
__a = original_image
__a = pipe_loaded(**UpperCAmelCase )[0]
__a = np.abs(to_np(UpperCAmelCase ) - to_np(UpperCAmelCase ) ).max()
self.assertLess(UpperCAmelCase , 1e-4 )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
__a = self.get_dummy_components()
__a = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__a = self.get_dummy_inputs(UpperCAmelCase )
__a = pipe(**UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase )
__a = self.pipeline_class.from_pretrained(UpperCAmelCase )
pipe_loaded.to(UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__a = self.get_dummy_inputs(UpperCAmelCase )
__a = pipe_loaded(**UpperCAmelCase )[0]
__a = np.abs(to_np(UpperCAmelCase ) - to_np(UpperCAmelCase ) ).max()
self.assertLess(UpperCAmelCase , 1e-4 )
| 197 | 0 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ =logging.get_logger(__name__)
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict ):
__a : List[str] = os.path.abspath(lowerCAmelCase__ )
logger.info(f"Converting TensorFlow checkpoint from {tf_path}" )
# Load weights from TF model
__a : Tuple = tf.train.list_variables(lowerCAmelCase__ )
__a : Optional[Any] = []
__a : Union[str, Any] = []
__a : str = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__a : Any = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"Skipping non-model layer {full_name}" )
continue
if "optimizer" in full_name:
logger.info(f"Skipping optimization layer {full_name}" )
continue
if name[0] == "model":
# ignore initial 'model'
__a : Any = name[1:]
# figure out how many levels deep the name is
__a : List[Any] = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(lowerCAmelCase__ )
# read data
__a : Tuple = tf.train.load_variable(lowerCAmelCase__ , lowerCAmelCase__ )
names.append('''/'''.join(lowerCAmelCase__ ) )
arrays.append(lowerCAmelCase__ )
logger.info(f"Read a total of {len(lowerCAmelCase__ ):,} layers" )
# Sanity check
if len(set(lowerCAmelCase__ ) ) != 1:
raise ValueError(f"Found layer names with different depths (layer depth {list(set(lowerCAmelCase__ ) )})" )
__a : int = list(set(lowerCAmelCase__ ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
__a : int = full_name.split('''/''' )
__a : Tuple = model
__a : Dict = []
for i, m_name in enumerate(lowerCAmelCase__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
__a : Union[str, Any] = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''embeddings''' )
__a : List[str] = getattr(lowerCAmelCase__ , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
__a : Dict = getattr(lowerCAmelCase__ , '''encoder''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''layer''' )
__a : Any = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
__a : Any = getattr(lowerCAmelCase__ , '''pooler''' )
__a : Optional[int] = getattr(lowerCAmelCase__ , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
__a : int = getattr(lowerCAmelCase__ , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
__a : Optional[int] = getattr(lowerCAmelCase__ , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
__a : List[str] = getattr(lowerCAmelCase__ , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''token_type_embeddings''' )
else:
raise ValueError(f"Unknown embedding layer with name {full_name}" )
trace.append('''weight''' )
__a : Tuple = getattr(lowerCAmelCase__ , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''attention''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
__a : int = getattr(lowerCAmelCase__ , '''attention''' )
__a : List[Any] = getattr(lowerCAmelCase__ , '''output''' )
__a : List[Any] = getattr(lowerCAmelCase__ , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
__a : Optional[int] = getattr(lowerCAmelCase__ , '''attention''' )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''output''' )
__a : Any = getattr(lowerCAmelCase__ , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
__a : Tuple = getattr(lowerCAmelCase__ , '''output''' )
__a : str = getattr(lowerCAmelCase__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
__a : int = getattr(lowerCAmelCase__ , '''output''' )
__a : str = getattr(lowerCAmelCase__ , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''intermediate''' )
__a : Optional[int] = getattr(lowerCAmelCase__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
__a : int = getattr(lowerCAmelCase__ , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
__a : Dict = getattr(lowerCAmelCase__ , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
__a : List[Any] = getattr(lowerCAmelCase__ , '''weight''' )
else:
logger.warning(f"Ignored {m_name}" )
# for certain layers reshape is necessary
__a : List[str] = '''.'''.join(lowerCAmelCase__ )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , lowerCAmelCase__ ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' , lowerCAmelCase__ ):
__a : str = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__a : Optional[Any] = array.transpose()
if pointer.shape == array.shape:
__a : str = torch.from_numpy(lowerCAmelCase__ )
else:
raise ValueError(
f"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"
f" {array.shape}" )
logger.info(f"Successfully set variable {full_name} to PyTorch layer {trace}" )
return model
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ):
# Instantiate model
logger.info(f"Loading model based on config from {config_path}..." )
__a : Dict = BertConfig.from_json_file(lowerCAmelCase__ )
__a : int = BertModel(lowerCAmelCase__ )
# Load weights from checkpoint
logger.info(f"Loading weights from checkpoint {tf_checkpoint_path}..." )
load_tfa_weights_in_bert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
logger.info(f"Saving PyTorch model to {pytorch_dump_path}..." )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
lowercase__ =parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 216 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ =logging.get_logger(__name__)
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict ):
__a : List[str] = os.path.abspath(lowerCAmelCase__ )
logger.info(f"Converting TensorFlow checkpoint from {tf_path}" )
# Load weights from TF model
__a : Tuple = tf.train.list_variables(lowerCAmelCase__ )
__a : Optional[Any] = []
__a : Union[str, Any] = []
__a : str = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__a : Any = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"Skipping non-model layer {full_name}" )
continue
if "optimizer" in full_name:
logger.info(f"Skipping optimization layer {full_name}" )
continue
if name[0] == "model":
# ignore initial 'model'
__a : Any = name[1:]
# figure out how many levels deep the name is
__a : List[Any] = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(lowerCAmelCase__ )
# read data
__a : Tuple = tf.train.load_variable(lowerCAmelCase__ , lowerCAmelCase__ )
names.append('''/'''.join(lowerCAmelCase__ ) )
arrays.append(lowerCAmelCase__ )
logger.info(f"Read a total of {len(lowerCAmelCase__ ):,} layers" )
# Sanity check
if len(set(lowerCAmelCase__ ) ) != 1:
raise ValueError(f"Found layer names with different depths (layer depth {list(set(lowerCAmelCase__ ) )})" )
__a : int = list(set(lowerCAmelCase__ ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
__a : int = full_name.split('''/''' )
__a : Tuple = model
__a : Dict = []
for i, m_name in enumerate(lowerCAmelCase__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
__a : Union[str, Any] = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''embeddings''' )
__a : List[str] = getattr(lowerCAmelCase__ , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
__a : Dict = getattr(lowerCAmelCase__ , '''encoder''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''layer''' )
__a : Any = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
__a : Any = getattr(lowerCAmelCase__ , '''pooler''' )
__a : Optional[int] = getattr(lowerCAmelCase__ , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
__a : int = getattr(lowerCAmelCase__ , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
__a : Optional[int] = getattr(lowerCAmelCase__ , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
__a : List[str] = getattr(lowerCAmelCase__ , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''token_type_embeddings''' )
else:
raise ValueError(f"Unknown embedding layer with name {full_name}" )
trace.append('''weight''' )
__a : Tuple = getattr(lowerCAmelCase__ , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''attention''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
__a : int = getattr(lowerCAmelCase__ , '''attention''' )
__a : List[Any] = getattr(lowerCAmelCase__ , '''output''' )
__a : List[Any] = getattr(lowerCAmelCase__ , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
__a : Optional[int] = getattr(lowerCAmelCase__ , '''attention''' )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''output''' )
__a : Any = getattr(lowerCAmelCase__ , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
__a : Tuple = getattr(lowerCAmelCase__ , '''output''' )
__a : str = getattr(lowerCAmelCase__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
__a : int = getattr(lowerCAmelCase__ , '''output''' )
__a : str = getattr(lowerCAmelCase__ , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''intermediate''' )
__a : Optional[int] = getattr(lowerCAmelCase__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
__a : int = getattr(lowerCAmelCase__ , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
__a : Dict = getattr(lowerCAmelCase__ , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
__a : List[Any] = getattr(lowerCAmelCase__ , '''weight''' )
else:
logger.warning(f"Ignored {m_name}" )
# for certain layers reshape is necessary
__a : List[str] = '''.'''.join(lowerCAmelCase__ )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , lowerCAmelCase__ ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' , lowerCAmelCase__ ):
__a : str = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__a : Optional[Any] = array.transpose()
if pointer.shape == array.shape:
__a : str = torch.from_numpy(lowerCAmelCase__ )
else:
raise ValueError(
f"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"
f" {array.shape}" )
logger.info(f"Successfully set variable {full_name} to PyTorch layer {trace}" )
return model
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ):
# Instantiate model
logger.info(f"Loading model based on config from {config_path}..." )
__a : Dict = BertConfig.from_json_file(lowerCAmelCase__ )
__a : int = BertModel(lowerCAmelCase__ )
# Load weights from checkpoint
logger.info(f"Loading weights from checkpoint {tf_checkpoint_path}..." )
load_tfa_weights_in_bert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
logger.info(f"Saving PyTorch model to {pytorch_dump_path}..." )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
lowercase__ =parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 216 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE :List[Any] = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
__SCREAMING_SNAKE_CASE :str = {
'''bert-base-uncased''': 512,
'''bert-large-uncased''': 512,
'''bert-base-cased''': 512,
'''bert-large-cased''': 512,
'''bert-base-multilingual-uncased''': 512,
'''bert-base-multilingual-cased''': 512,
'''bert-base-chinese''': 512,
'''bert-base-german-cased''': 512,
'''bert-large-uncased-whole-word-masking''': 512,
'''bert-large-cased-whole-word-masking''': 512,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 512,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 512,
'''bert-base-cased-finetuned-mrpc''': 512,
'''bert-base-german-dbmdz-cased''': 512,
'''bert-base-german-dbmdz-uncased''': 512,
'''TurkuNLP/bert-base-finnish-cased-v1''': 512,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 512,
'''wietsedv/bert-base-dutch-cased''': 512,
}
__SCREAMING_SNAKE_CASE :int = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Dict = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = BertTokenizer
def __init__( self : Union[str, Any] , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : Optional[int]=True , snake_case_ : Optional[Any]="[UNK]" , snake_case_ : List[str]="[SEP]" , snake_case_ : List[Any]="[PAD]" , snake_case_ : int="[CLS]" , snake_case_ : Dict="[MASK]" , snake_case_ : Any=True , snake_case_ : int=None , **snake_case_ : Optional[int] , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case_ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case_ ) != tokenize_chinese_chars
):
_UpperCAmelCase = getattr(snake_case_ , normalizer_state.pop("type" ) )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = strip_accents
_UpperCAmelCase = tokenize_chinese_chars
_UpperCAmelCase = normalizer_class(**snake_case_ )
_UpperCAmelCase = do_lower_case
def lowercase ( self : str , snake_case_ : str , snake_case_ : Any=None ):
_UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : Optional[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Any , snake_case_ : str , snake_case_ : Optional[str] = None ):
_UpperCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 156 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : list ) -> list:
'''simple docstring'''
_UpperCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCAmelCase = True
for i in range(0 , len(__lowercase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCAmelCase , _UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCAmelCase = False
for i in range(1 , len(__lowercase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCAmelCase , _UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCAmelCase = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
__SCREAMING_SNAKE_CASE :Optional[Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
__SCREAMING_SNAKE_CASE :List[str] = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 156 | 1 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class a__ ( UpperCAmelCase__ ):
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = 8
# DPR tok
__lowerCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowerCamelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(a , exist_ok=a )
__lowerCamelCase = os.path.join(a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
__lowerCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__lowerCamelCase = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowerCamelCase = {'''unk_token''': '''<unk>'''}
__lowerCamelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(a , exist_ok=a )
__lowerCamelCase = os.path.join(a , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(a , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(a ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.get_dummy_dataset()
__lowerCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__lowerCamelCase = dataset
__lowerCamelCase = RagRetriever(
a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE__ ( self : str , a : bool ):
"""simple docstring"""
__lowerCamelCase = self.get_dummy_dataset()
__lowerCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
__lowerCamelCase = os.path.join(self.tmpdirname , '''dataset''' )
__lowerCamelCase = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
__lowerCamelCase = RagRetriever(
a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__lowerCamelCase = RagRetriever(
a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , a ) , )
return retriever
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
__lowerCamelCase = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
__lowerCamelCase = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
__lowerCamelCase = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(a , open(a , '''wb''' ) )
__lowerCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
__lowerCamelCase = RagRetriever(
a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_canonical_hf_index_retriever()
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = retriever.retrieve(a , n_docs=a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__lowerCamelCase = self.get_dummy_dataset()
retriever.save_pretrained(a )
__lowerCamelCase = RagRetriever.from_pretrained(a )
self.assertIsInstance(a , a )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(a , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=a )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = retriever.retrieve(a , n_docs=a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(a )
__lowerCamelCase = RagRetriever.from_pretrained(a )
self.assertIsInstance(a , a )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(a , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=a )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = retriever.retrieve(a , n_docs=a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(a )
__lowerCamelCase = RagRetriever.from_pretrained(a )
self.assertIsInstance(a , a )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(a , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_legacy_index_retriever()
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = retriever.retrieve(a , n_docs=a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , a )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(a )
__lowerCamelCase = RagRetriever.from_pretrained(a )
self.assertIsInstance(a , a )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(a , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
import torch
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_canonical_hf_index_retriever()
__lowerCamelCase = [[5, 7], [10, 11]]
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever(a , a , prefix=retriever.config.generator.prefix , n_docs=a )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(a , a )
self.assertIsInstance(a , a )
self.assertIsInstance(a , np.ndarray )
__lowerCamelCase = retriever(
a , a , prefix=retriever.config.generator.prefix , n_docs=a , return_tensors='''pt''' , )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(a , torch.Tensor )
self.assertIsInstance(a , torch.Tensor )
self.assertIsInstance(a , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase = self.get_dpr_ctx_encoder_tokenizer()
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=a )
retriever.set_ctx_encoder_tokenizer(a )
__lowerCamelCase = [[5, 7], [10, 11]]
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever(a , a , prefix=retriever.config.generator.prefix , n_docs=a )
self.assertEqual(
len(a ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , a ) # check for doc token related keys in dictionary.
| 67 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = parent
_lowerCAmelCase = config_class
_lowerCAmelCase = has_text_modality
_lowerCAmelCase = kwargs
_lowerCAmelCase = common_properties
def _snake_case ( self ) -> int:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
_lowerCAmelCase = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) , msg=f'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(_lowerCAmelCase ):
try:
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.parent.assertEqual(
getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(_lowerCAmelCase , _lowerCAmelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowerCAmelCase ):
try:
_lowerCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(_lowerCAmelCase , _lowerCAmelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
_lowerCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = os.path.join(_lowerCAmelCase , "config.json" )
config_first.to_json_file(_lowerCAmelCase )
_lowerCAmelCase = self.config_class.from_json_file(_lowerCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _snake_case ( self ) -> str:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = self.config_class.from_pretrained(_lowerCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
_lowerCAmelCase = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
config_first.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = self.config_class.from_pretrained(_lowerCAmelCase , subfolder=_lowerCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_lowerCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _snake_case ( self ) -> List[Any]:
if self.config_class.is_composition:
return
_lowerCAmelCase = self.config_class()
self.parent.assertIsNotNone(_lowerCAmelCase )
def _snake_case ( self ) -> str:
_lowerCAmelCase = copy.deepcopy(_lowerCAmelCase )
_lowerCAmelCase = self.config_class(**_lowerCAmelCase )
_lowerCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(_lowerCAmelCase , _lowerCAmelCase ) != value:
wrong_values.append((key, getattr(_lowerCAmelCase , _lowerCAmelCase ), value) )
if len(_lowerCAmelCase ) > 0:
_lowerCAmelCase = "\n".join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' )
def _snake_case ( self ) -> List[str]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 158 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
# Initialise PyTorch model
a__: Union[str, Any] = MobileBertConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(F'Building PyTorch model from configuration: {config}' )
a__: List[Any] = MobileBertForPreTraining(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
a__: Dict = load_tf_weights_in_mobilebert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 203 | """simple docstring"""
from __future__ import annotations
class __snake_case :
def __init__( self , lowercase=None) -> Optional[Any]:
'''simple docstring'''
a__: int = data
a__: str = None
def __repr__( self) -> List[str]:
'''simple docstring'''
a__: Optional[Any] = []
a__: Union[str, Any] = self
while temp:
string_rep.append(f'{temp.data}')
a__: Tuple = temp.next
return "->".join(lowercase)
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
if not elements_list:
raise Exception('The Elements List is empty' )
a__: Any = Node(elements_list[0] )
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
a__: Optional[Any] = Node(elements_list[i] )
a__: Tuple = current.next
return head
def __a ( _SCREAMING_SNAKE_CASE ) ->None:
if head_node is not None and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
print_reverse(head_node.next )
print(head_node.data )
def __a ( ) ->Optional[Any]:
from doctest import testmod
testmod()
a__: Tuple = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(_SCREAMING_SNAKE_CASE )
print('Elements in Reverse:' )
print_reverse(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 203 | 1 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _A ( UpperCamelCase_ : Any) -> List[Any]:
'''simple docstring'''
__lowercase = filter(lambda UpperCamelCase_: p.requires_grad, model.parameters())
__lowercase = sum([np.prod(p.size()) for p in model_parameters])
return params
_a = logging.getLogger(__name__)
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Union[str, Any]) -> Optional[int]:
'''simple docstring'''
if metric == "rouge2":
__lowercase = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
__lowercase = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
__lowercase = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function.")
__lowercase = ModelCheckpoint(
dirpath=UpperCamelCase_, filename=UpperCamelCase_, monitor=F"""val_{metric}""", mode="max", save_top_k=3, every_n_epochs=1, )
return checkpoint_callback
def _A ( UpperCamelCase_ : Tuple, UpperCamelCase_ : List[str]) -> int:
'''simple docstring'''
return EarlyStopping(
monitor=F"""val_{metric}""", mode="min" if "loss" in metric else "max", patience=UpperCamelCase_, verbose=UpperCamelCase_, )
class _lowerCAmelCase ( pl.Callback ):
"""simple docstring"""
def _lowercase ( self : List[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : str ):
__lowercase = {F"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(UpperCAmelCase__ )
@rank_zero_only
def _lowercase ( self : List[Any], UpperCAmelCase__ : pl.Trainer, UpperCAmelCase__ : pl.LightningModule, UpperCAmelCase__ : str, UpperCAmelCase__ : Optional[int]=True ):
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__lowercase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
__lowercase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowercase = od / "test_results.txt"
__lowercase = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowercase = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
__lowercase = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=UpperCAmelCase__ )
generations_file.parent.mkdir(exist_ok=UpperCAmelCase__ )
with open(UpperCAmelCase__, "a+" ) as writer:
for key in sorted(UpperCAmelCase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowercase = metrics[key]
if isinstance(UpperCAmelCase__, torch.Tensor ):
__lowercase = val.item()
__lowercase = F"""{key}: {val:.6f}\n"""
writer.write(UpperCAmelCase__ )
if not save_generations:
return
if "preds" in metrics:
__lowercase = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(UpperCAmelCase__ )
@rank_zero_only
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any] ):
try:
__lowercase = pl_module.model.model.num_parameters()
except AttributeError:
__lowercase = pl_module.model.num_parameters()
__lowercase = count_trainable_parameters(UpperCAmelCase__ )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def _lowercase ( self : Any, UpperCAmelCase__ : pl.Trainer, UpperCAmelCase__ : pl.LightningModule ):
save_json(pl_module.metrics, pl_module.metrics_save_path )
return self._write_logs(UpperCAmelCase__, UpperCAmelCase__, "test" )
@rank_zero_only
def _lowercase ( self : int, UpperCAmelCase__ : pl.Trainer, UpperCAmelCase__ : Optional[Any] ):
save_json(pl_module.metrics, pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 17 |
"""simple docstring"""
from collections.abc import Sequence
def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase_))
def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float:
'''simple docstring'''
__lowercase = 0.0
for coeff in reversed(UpperCamelCase_):
__lowercase = result * x + coeff
return result
if __name__ == "__main__":
_a = (0.0, 0.0, 5.0, 9.3, 7.0)
_a = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 1 |
'''simple docstring'''
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
__snake_case : Dict = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if exitstatus == 5:
__snake_case : List[Any] = 0
# Doctest custom flag to ignore output.
__UpperCamelCase = doctest.register_optionflag("IGNORE_RESULT")
__UpperCamelCase = doctest.OutputChecker
class _A ( __lowercase ):
def lowercase__ ( self : int , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Any ) -> List[Any]:
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __magic_name__ , __magic_name__ , __magic_name__ )
__UpperCamelCase = CustomOutputChecker
__UpperCamelCase = HfDoctestModule
__UpperCamelCase = HfDocTestParser
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["ConditionalDetrFeatureExtractor"]
__UpperCamelCase = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase :Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase :Dict = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Dict = """blenderbot-small"""
A_ : int = ["""past_key_values"""]
A_ : int = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[int] , _A : Optional[Any]=50265 , _A : str=512 , _A : Optional[Any]=8 , _A : List[str]=2048 , _A : str=16 , _A : Tuple=8 , _A : str=2048 , _A : List[Any]=16 , _A : List[str]=0.0 , _A : Union[str, Any]=0.0 , _A : Union[str, Any]=True , _A : Tuple=True , _A : int="gelu" , _A : str=512 , _A : str=0.1 , _A : int=0.0 , _A : Dict=0.0 , _A : Union[str, Any]=0.02 , _A : Any=1 , _A : Dict=False , _A : List[Any]=0 , _A : Optional[int]=1 , _A : Tuple=2 , _A : int=2 , **_A : Dict , ) -> Dict:
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Union[str, Any] = max_position_embeddings
__magic_name__ : Any = d_model
__magic_name__ : int = encoder_ffn_dim
__magic_name__ : str = encoder_layers
__magic_name__ : str = encoder_attention_heads
__magic_name__ : Any = decoder_ffn_dim
__magic_name__ : Optional[int] = decoder_layers
__magic_name__ : Dict = decoder_attention_heads
__magic_name__ : List[str] = dropout
__magic_name__ : List[Any] = attention_dropout
__magic_name__ : Any = activation_dropout
__magic_name__ : Union[str, Any] = activation_function
__magic_name__ : int = init_std
__magic_name__ : Any = encoder_layerdrop
__magic_name__ : Tuple = decoder_layerdrop
__magic_name__ : Dict = use_cache
__magic_name__ : Any = encoder_layers
__magic_name__ : str = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Optional[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__magic_name__ : int = {0: 'batch'}
__magic_name__ : Union[str, Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__magic_name__ : Optional[int] = {0: 'batch', 1: 'decoder_sequence'}
__magic_name__ : Optional[int] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_A , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__magic_name__ : int = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__magic_name__ , __magic_name__ : Any = self.num_layers
for i in range(_A ):
__magic_name__ : Union[str, Any] = {0: 'batch', 2: 'past_sequence + sequence'}
__magic_name__ : Dict = {0: 'batch', 2: 'past_sequence + sequence'}
else:
__magic_name__ : Tuple = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __lowerCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Dict = super().outputs
else:
__magic_name__ : int = super(_A , self ).outputs
if self.use_past:
__magic_name__ , __magic_name__ : str = self.num_layers
for i in range(_A ):
__magic_name__ : Union[str, Any] = {0: 'batch', 2: 'past_sequence + sequence'}
__magic_name__ : Any = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __lowerCAmelCase ( self : str , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ) -> Mapping[str, Any]:
__magic_name__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A )
# Generate decoder inputs
__magic_name__ : Any = seq_length if not self.use_past else 1
__magic_name__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A )
__magic_name__ : List[str] = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__magic_name__ : Optional[Any] = dict(**_A , **_A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__magic_name__ , __magic_name__ : Tuple = common_inputs['input_ids'].shape
__magic_name__ : List[Any] = common_inputs['decoder_input_ids'].shape[1]
__magic_name__ , __magic_name__ : Any = self.num_attention_heads
__magic_name__ : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__magic_name__ : str = decoder_seq_length + 3
__magic_name__ : Optional[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__magic_name__ : Any = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(_A , _A )] , dim=1 )
__magic_name__ : Dict = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__magic_name__ , __magic_name__ : int = self.num_layers
__magic_name__ : Any = min(_A , _A )
__magic_name__ : List[str] = max(_A , _A ) - min_num_layers
__magic_name__ : Union[str, Any] = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(_A ):
common_inputs["past_key_values"].append(
(
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
) )
# TODO: test this.
__magic_name__ : Dict = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(_A , _A ):
common_inputs["past_key_values"].append((torch.zeros(_A ), torch.zeros(_A )) )
return common_inputs
def __lowerCAmelCase ( self : Optional[Any] , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ) -> Mapping[str, Any]:
__magic_name__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__magic_name__ , __magic_name__ : Tuple = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__magic_name__ : Tuple = seqlen + 2
__magic_name__ , __magic_name__ : Optional[int] = self.num_layers
__magic_name__ , __magic_name__ : Optional[int] = self.num_attention_heads
__magic_name__ : Any = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__magic_name__ : str = common_inputs['attention_mask'].dtype
__magic_name__ : Union[str, Any] = torch.cat(
[common_inputs['attention_mask'], torch.ones(_A , _A , dtype=_A )] , dim=1 )
__magic_name__ : Optional[int] = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(_A )
]
return common_inputs
def __lowerCAmelCase ( self : Optional[int] , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__magic_name__ : str = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__magic_name__ : str = tokenizer.num_special_tokens_to_add(_A )
__magic_name__ : str = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
__magic_name__ : int = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__magic_name__ : List[str] = dict(tokenizer(_A , return_tensors=_A ) )
return common_inputs
def __lowerCAmelCase ( self : List[Any] , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
elif self.task == "causal-lm":
__magic_name__ : List[str] = self._generate_dummy_inputs_for_causal_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
else:
__magic_name__ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
return common_inputs
def __lowerCAmelCase ( self : Optional[int] , _A : List[str] , _A : List[str] , _A : Dict , _A : Tuple ) -> Optional[int]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : List[str] = super()._flatten_past_key_values_(_A , _A , _A , _A )
else:
__magic_name__ : Dict = super(_A , self )._flatten_past_key_values_(
_A , _A , _A , _A ) | 331 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , _A : Optional[int] , _A : Union[str, Any]=13 , _A : Optional[int]=7 , _A : int=True , _A : Union[str, Any]=True , _A : Tuple=True , _A : Dict=True , _A : int=99 , _A : str=32 , _A : List[Any]=2 , _A : Any=4 , _A : List[str]=37 , _A : List[str]="gelu" , _A : Any=0.1 , _A : List[str]=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : Union[str, Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : str=4 , _A : int=None , ) -> int:
__magic_name__ : str = parent
__magic_name__ : List[Any] = 13
__magic_name__ : Union[str, Any] = 7
__magic_name__ : Tuple = True
__magic_name__ : Dict = True
__magic_name__ : Union[str, Any] = True
__magic_name__ : Tuple = True
__magic_name__ : int = 99
__magic_name__ : List[str] = 384
__magic_name__ : Optional[int] = 2
__magic_name__ : List[Any] = 4
__magic_name__ : int = 37
__magic_name__ : Union[str, Any] = 'gelu'
__magic_name__ : Optional[int] = 0.1
__magic_name__ : str = 0.1
__magic_name__ : Optional[Any] = 512
__magic_name__ : Any = 16
__magic_name__ : Union[str, Any] = 2
__magic_name__ : Any = 0.02
__magic_name__ : List[str] = 3
__magic_name__ : Tuple = 4
__magic_name__ : List[Any] = 128
__magic_name__ : Optional[Any] = 2
__magic_name__ : List[str] = 9
__magic_name__ : str = 1
__magic_name__ : List[str] = None
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
__magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Optional[Any] = None
if self.use_input_mask:
__magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[str] = None
if self.use_token_type_ids:
__magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : Tuple = None
__magic_name__ : Union[str, Any] = None
__magic_name__ : int = None
if self.use_labels:
__magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Optional[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : int , _A : int , _A : str , _A : Union[str, Any] , _A : List[str] , _A : Tuple , _A : int , _A : Union[str, Any] ) -> Any:
__magic_name__ : Dict = TFConvBertModel(config=_A )
__magic_name__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__magic_name__ : Any = [input_ids, input_mask]
__magic_name__ : Tuple = model(_A )
__magic_name__ : List[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : int , _A : str , _A : Dict , _A : Dict , _A : Dict , _A : Any , _A : Optional[int] , _A : int ) -> Optional[Any]:
__magic_name__ : Dict = TFConvBertForMaskedLM(config=_A )
__magic_name__ : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Dict = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : Optional[int] , _A : str , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Dict , _A : Union[str, Any] , _A : Dict ) -> Tuple:
__magic_name__ : Any = self.num_labels
__magic_name__ : str = TFConvBertForSequenceClassification(config=_A )
__magic_name__ : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : int , _A : Dict , _A : Tuple , _A : str , _A : str , _A : int , _A : List[Any] , _A : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = self.num_choices
__magic_name__ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
__magic_name__ : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : str = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : Tuple = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : Optional[int] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__magic_name__ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : List[Any] , _A : int , _A : List[str] , _A : int , _A : Tuple , _A : List[str] , _A : Any , _A : Optional[int] ) -> List[Any]:
__magic_name__ : List[Any] = self.num_labels
__magic_name__ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
__magic_name__ : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : List[Any] , _A : Optional[int] , _A : Tuple , _A : str , _A : List[str] ) -> int:
__magic_name__ : Dict = TFConvBertForQuestionAnswering(config=_A )
__magic_name__ : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : List[str] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : str = config_and_inputs
__magic_name__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : List[str] = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : Tuple = False
A_ : Any = False
A_ : List[Any] = False
def __lowerCAmelCase ( self : List[Any] ) -> int:
__magic_name__ : Optional[Any] = TFConvBertModelTester(self )
__magic_name__ : List[Any] = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __lowerCAmelCase ( self : int ) -> Any:
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __lowerCAmelCase ( self : Dict ) -> List[str]:
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[int] = True
__magic_name__ : Any = True
if hasattr(_A , 'use_cache' ):
__magic_name__ : List[Any] = True
__magic_name__ : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : Optional[Any] = getattr(self.model_tester , 'key_length' , _A )
for model_class in self.all_model_classes:
__magic_name__ : List[str] = self._prepare_for_class(_A , _A )
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : Tuple = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A , saved_model=_A )
__magic_name__ : Union[str, Any] = os.path.join(_A , 'saved_model' , '1' )
__magic_name__ : Optional[int] = tf.keras.models.load_model(_A )
__magic_name__ : Optional[Any] = model(_A )
if self.is_encoder_decoder:
__magic_name__ : Optional[int] = outputs['encoder_hidden_states']
__magic_name__ : Tuple = outputs['encoder_attentions']
else:
__magic_name__ : Union[str, Any] = outputs['hidden_states']
__magic_name__ : Optional[Any] = outputs['attentions']
self.assertEqual(len(_A ) , _A )
__magic_name__ : Optional[Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ) , _A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
__magic_name__ : Optional[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_A )
def __lowerCAmelCase ( self : List[str] ) -> Any:
__magic_name__ , __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : str = True
__magic_name__ : Optional[int] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : List[Any] = getattr(self.model_tester , 'key_length' , _A )
__magic_name__ : Optional[int] = getattr(self.model_tester , 'key_length' , _A )
def check_decoder_attentions_output(_A : List[Any] ):
__magic_name__ : Tuple = len(_A )
self.assertEqual(out_len % 2 , 0 )
__magic_name__ : Any = outputs.decoder_attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_A : int ):
__magic_name__ : Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__magic_name__ : Union[str, Any] = True
__magic_name__ : Tuple = False
__magic_name__ : List[str] = model_class(_A )
__magic_name__ : Any = model(self._prepare_for_class(_A , _A ) )
__magic_name__ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
__magic_name__ : Any = model_class(_A )
__magic_name__ : Any = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__magic_name__ : Optional[int] = True
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : Optional[int] = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
__magic_name__ : str = True
__magic_name__ : str = True
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : str = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_A ) )
self.assertEqual(model.config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : int ) -> int:
__magic_name__ : List[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__magic_name__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ : Tuple = model(_A )[0]
__magic_name__ : str = [1, 6, 768]
self.assertEqual(output.shape , _A )
__magic_name__ : Tuple = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 ) | 331 | 1 |
"""simple docstring"""
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
SCREAMING_SNAKE_CASE_ = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
SCREAMING_SNAKE_CASE_ = -1
return False
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 354 | from __future__ import annotations
A : Dict = "#"
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict ) -> None:
SCREAMING_SNAKE_CASE_ = {}
def __A ( self : List[Any] , __magic_name__ : str ) -> None:
SCREAMING_SNAKE_CASE_ = self._trie
for char in text:
if char not in trie:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = trie[char]
SCREAMING_SNAKE_CASE_ = True
def __A ( self : Union[str, Any] , __magic_name__ : str ) -> tuple | list:
SCREAMING_SNAKE_CASE_ = self._trie
for char in prefix:
if char in trie:
SCREAMING_SNAKE_CASE_ = trie[char]
else:
return []
return self._elements(__magic_name__ )
def __A ( self : int , __magic_name__ : dict ) -> tuple:
SCREAMING_SNAKE_CASE_ = []
for c, v in d.items():
SCREAMING_SNAKE_CASE_ = [" "] if c == END else [(c + s) for s in self._elements(__magic_name__ )]
result.extend(__magic_name__ )
return tuple(__magic_name__ )
A : Union[str, Any] = Trie()
A : Optional[int] = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = trie.find_word(__UpperCamelCase )
return tuple(string + word for word in suffixes )
def a__ ( ):
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 305 | 0 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class a :
def __init__( self : Any ) -> Optional[int]:
lowerCamelCase_ = psutil.Process()
lowerCamelCase_ = False
def UpperCamelCase ( self : str ) -> str:
lowerCamelCase_ = -1
while True:
lowerCamelCase_ = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
lowerCamelCase_ = True
lowerCamelCase_ = threading.Thread(target=self.peak_monitor )
lowerCamelCase_ = True
self.thread.start()
def UpperCamelCase ( self : Any ) -> Optional[int]:
lowerCamelCase_ = False
self.thread.join()
return self.cpu_memory_peak
_SCREAMING_SNAKE_CASE : List[str] = PeakCPUMemory()
def lowerCamelCase__ ( ) -> List[Any]:
lowerCamelCase_ = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowerCamelCase_ = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
lowerCamelCase_ = torch.cuda.memory_allocated(_lowerCamelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def lowerCamelCase__ ( _lowerCamelCase : List[str] ) -> str:
lowerCamelCase_ = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowerCamelCase_ = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
lowerCamelCase_ = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
lowerCamelCase_ = (torch.cuda.memory_allocated(_lowerCamelCase ) - start_measures[str(_lowerCamelCase )]) / 2**20
lowerCamelCase_ = (torch.cuda.max_memory_allocated(_lowerCamelCase ) - start_measures[str(_lowerCamelCase )]) / 2**20
return measures
def lowerCamelCase__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any ) -> Any:
print(F'''{description}:''' )
print(F'''- Time: {measures["time"]:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(F'''- GPU {i} allocated: {measures[str(_lowerCamelCase )]:.2f}MiB''' )
lowerCamelCase_ = measures[F'''{i}-peak''']
print(F'''- GPU {i} peak: {peak:.2f}MiB''' )
print(F'''- CPU RAM allocated: {measures["cpu"]:.2f}MiB''' )
print(F'''- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB''' )
| 183 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __magic_name__ ( A : NDArray[floataa], A : NDArray[floataa], A : list[int], A : int, ):
'''simple docstring'''
a , a = coefficient_matrix.shape
a , a = constant_matrix.shape
if rowsa != colsa:
a = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(A )
if colsa != 1:
a = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(A )
if rowsa != rowsa:
a = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(A )
if len(A ) != rowsa:
a = (
"Number of initial values must be equal to number of rows in coefficient "
F"""matrix but received {len(A )} and {rowsa}"""
)
raise ValueError(A )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
a = np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
a , a = table.shape
strictly_diagonally_dominant(A )
# Iterates the whole matrix for given number of times
for _ in range(A ):
a = []
for row in range(A ):
a = 0
for col in range(A ):
if col == row:
a = table[row][col]
elif col == cols - 1:
a = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
a = (temp + val) / denom
new_val.append(A )
a = new_val
return [float(A ) for i in new_val]
def __magic_name__ ( A : NDArray[floataa] ):
'''simple docstring'''
a , a = table.shape
a = True
for i in range(0, A ):
a = 0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 | 0 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__lowerCAmelCase = '\\n Text data.\n Second line of data.'
__lowerCAmelCase = 'file'
@pytest.fixture(scope="""session""" )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
_snake_case = bytes(_SCREAMING_SNAKE_CASE , """utf-8""" )
with zstd.open(_SCREAMING_SNAKE_CASE , """wb""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
with open(os.path.join(tmpfs.local_root_dir , _SCREAMING_SNAKE_CASE ) , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
_snake_case = input_paths[compression_format]
_snake_case = tmp_path / """cache"""
_snake_case = DownloadConfig(cache_dir=_SCREAMING_SNAKE_CASE , extract_compressed_file=_SCREAMING_SNAKE_CASE )
_snake_case = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE ) as f:
_snake_case = f.read()
with open(_SCREAMING_SNAKE_CASE ) as f:
_snake_case = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = """custom_cache"""
_snake_case = """custom_extracted_dir"""
_snake_case = tmp_path / """custom_extracted_path"""
if default_extracted:
_snake_case = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , _SCREAMING_SNAKE_CASE )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(_SCREAMING_SNAKE_CASE ) )
_snake_case = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_snake_case = xz_file
_snake_case = (
DownloadConfig(extract_compressed_file=_SCREAMING_SNAKE_CASE )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_SCREAMING_SNAKE_CASE )
)
_snake_case = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
assert Path(_SCREAMING_SNAKE_CASE ).parent.parts[-2:] == expected
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
# absolute path
_snake_case = str(Path(_SCREAMING_SNAKE_CASE ).resolve() )
assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file
# relative path
_snake_case = str(Path(_SCREAMING_SNAKE_CASE ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
# absolute path
_snake_case = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path(_SCREAMING_SNAKE_CASE )
# relative path
_snake_case = """./__missing_file__.txt"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path(_SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = get_from_cache(f"""tmp://{tmpfs_file}""" )
with open(_SCREAMING_SNAKE_CASE ) as f:
_snake_case = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( ):
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
http_get("""https://huggingface.co""" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
ftp_get("""ftp://huggingface.co""" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
fsspec_get("""s3://huggingface.co""" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
fsspec_head("""s3://huggingface.co""" ) | 270 |
'''simple docstring'''
import qiskit
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
_snake_case = qiskit.QuantumCircuit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_snake_case = qiskit.execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCAmelCase = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''') | 270 | 1 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowerCamelCase : List[str] = 'src/diffusers'
_lowerCamelCase : Dict = '.'
# This is to make sure the diffusers module imported is the one in the repo.
_lowerCamelCase : Optional[Any] = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowerCamelCase : Optional[int] = spec.loader.load_module()
def __a ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
return line.startswith(UpperCAmelCase ) or len(UpperCAmelCase ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , UpperCAmelCase ) is not None
def __a ( UpperCAmelCase ) ->Any:
"""simple docstring"""
A = object_name.split(""".""" )
A = 0
# First let's find the module where our object lives.
A = parts[i]
while i < len(UpperCAmelCase ) and not os.path.isfile(os.path.join(UpperCAmelCase , f"""{module}.py""" ) ):
i += 1
if i < len(UpperCAmelCase ):
A = os.path.join(UpperCAmelCase , parts[i] )
if i >= len(UpperCAmelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(UpperCAmelCase , f"""{module}.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A = f.readlines()
# Now let's find the class / func in the code!
A = """"""
A = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A = line_index
while line_index < len(UpperCAmelCase ) and _should_continue(lines[line_index] , UpperCAmelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
return "".join(UpperCAmelCase )
_lowerCamelCase : Tuple = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
_lowerCamelCase : Optional[Any] = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
_lowerCamelCase : Optional[int] = re.compile(R'<FILL\s+[^>]*>')
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
A = code.split("""\n""" )
A = 0
while idx < len(UpperCAmelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def __a ( UpperCAmelCase ) ->Any:
"""simple docstring"""
A = len(get_indent(UpperCAmelCase ) ) > 0
if has_indent:
A = f"""class Bla:\n{code}"""
A = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCAmelCase )
A = black.format_str(UpperCAmelCase , mode=UpperCAmelCase )
A , A = style_docstrings_in_code(UpperCAmelCase )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def __a ( UpperCAmelCase , UpperCAmelCase=False ) ->List[Any]:
"""simple docstring"""
with open(UpperCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A = f.readlines()
A = []
A = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase ):
A = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A , A , A = search.groups()
A = find_code_in_diffusers(UpperCAmelCase )
A = get_indent(UpperCAmelCase )
A = line_index + 1 if indent == theoretical_indent else line_index + 2
A = theoretical_indent
A = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A = True
while line_index < len(UpperCAmelCase ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase ):
break
A = lines[line_index]
A = _should_continue(UpperCAmelCase , UpperCAmelCase ) and re.search(f"""^{indent}# End copy""" , UpperCAmelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
A = """""".join(UpperCAmelCase )
# Remove any nested `Copied from` comments to avoid circular copies
A = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(UpperCAmelCase ) is None]
A = """\n""".join(UpperCAmelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase ) > 0:
A = replace_pattern.replace("""with""" , """""" ).split(""",""" )
A = [_re_replace_pattern.search(UpperCAmelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A , A , A = pattern.groups()
A = re.sub(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if option.strip() == "all-casing":
A = re.sub(obja.lower() , obja.lower() , UpperCAmelCase )
A = re.sub(obja.upper() , obja.upper() , UpperCAmelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A = blackify(lines[start_index - 1] + theoretical_code )
A = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A = lines[:start_index] + [theoretical_code] + lines[line_index:]
A = start_index + 1
if overwrite and len(UpperCAmelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase )
return diffs
def __a ( UpperCAmelCase = False ) ->Optional[Any]:
"""simple docstring"""
A = glob.glob(os.path.join(UpperCAmelCase , """**/*.py""" ) , recursive=UpperCAmelCase )
A = []
for filename in all_files:
A = is_copy_consistent(UpperCAmelCase , UpperCAmelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(UpperCAmelCase ) > 0:
A = """\n""".join(UpperCAmelCase )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_lowerCamelCase : List[str] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 258 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def A (self : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
A = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
A = VideoClassificationPipeline(model=_lowerCAmelCase , image_processor=_lowerCAmelCase , top_k=2 )
A = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def A (self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] ):
for example in examples:
A = video_classifier(_lowerCAmelCase )
self.assertEqual(
_lowerCAmelCase , [
{"""score""": ANY(_lowerCAmelCase ), """label""": ANY(_lowerCAmelCase )},
{"""score""": ANY(_lowerCAmelCase ), """label""": ANY(_lowerCAmelCase )},
] , )
@require_torch
def A (self : Optional[Any] ):
A = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
A = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
A = pipeline(
"""video-classification""" , model=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , frame_sampling_rate=4 )
A = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
A = video_classifier(_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}] , )
A = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
] , )
@require_tf
def A (self : List[Any] ):
pass
| 258 | 1 |
"""simple docstring"""
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] ):
return f'''gaussian_noise_s={seed}_shape={"_".join([str(__lowerCAmelCase ) for s in shape] )}.npy'''
def lowerCAmelCase_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : int=(4, 4, 64, 64) , __lowerCAmelCase : Dict=False ):
_UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
_UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__lowerCAmelCase , __lowerCAmelCase ) ) , dtype=__lowerCAmelCase )
return image
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : List[str]="CompVis/stable-diffusion-v1-4" ):
_UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
_UpperCAmelCase = """bf16""" if fpaa else None
_UpperCAmelCase , _UpperCAmelCase = FlaxUNetaDConditionModel.from_pretrained(
__lowerCAmelCase , subfolder="""unet""" , dtype=__lowerCAmelCase , revision=__lowerCAmelCase )
return model, params
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str=0 , __lowerCAmelCase : Tuple=(4, 77, 768) , __lowerCAmelCase : Any=False ):
_UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
_UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__lowerCAmelCase , __lowerCAmelCase ) ) , dtype=__lowerCAmelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
] )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict ):
_UpperCAmelCase , _UpperCAmelCase = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=__lowerCAmelCase )
_UpperCAmelCase = self.get_latents(__lowerCAmelCase , fpaa=__lowerCAmelCase )
_UpperCAmelCase = self.get_encoder_hidden_states(__lowerCAmelCase , fpaa=__lowerCAmelCase )
_UpperCAmelCase = model.apply(
{"""params""": params} , __lowerCAmelCase , jnp.array(__lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__lowerCAmelCase , ).sample
assert sample.shape == latents.shape
_UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_UpperCAmelCase = jnp.array(__lowerCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
] )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : int ):
_UpperCAmelCase , _UpperCAmelCase = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=__lowerCAmelCase )
_UpperCAmelCase = self.get_latents(__lowerCAmelCase , shape=(4, 4, 96, 96) , fpaa=__lowerCAmelCase )
_UpperCAmelCase = self.get_encoder_hidden_states(__lowerCAmelCase , shape=(4, 77, 1024) , fpaa=__lowerCAmelCase )
_UpperCAmelCase = model.apply(
{"""params""": params} , __lowerCAmelCase , jnp.array(__lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__lowerCAmelCase , ).sample
assert sample.shape == latents.shape
_UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_UpperCAmelCase = jnp.array(__lowerCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-2 )
| 360 | """simple docstring"""
import string
from math import logaa
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = document.translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ).replace("""\n""" ,"""""" )
_UpperCAmelCase = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("""\n""" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase ))
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) ,3 )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return round(tf * idf ,3 )
| 30 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCAmelCase = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=8 ):
_snake_case = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_snake_case = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Optional[int]:
super().__init__()
self.register_modules(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , movq=UpperCAmelCase , )
_snake_case = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
if latents is None:
_snake_case = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase , dtype=UpperCAmelCase )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
_snake_case = latents.to(UpperCAmelCase )
_snake_case = latents * scheduler.init_noise_sigma
return latents
def lowercase (self , UpperCAmelCase=0 ) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_snake_case = torch.device(f"""cuda:{gpu_id}""" )
_snake_case = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase , UpperCAmelCase )
def lowercase (self , UpperCAmelCase=0 ) -> Dict:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_snake_case = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_snake_case = None
for cpu_offloaded_model in [self.unet, self.movq]:
_snake_case, _snake_case = cpu_offload_with_hook(UpperCAmelCase , UpperCAmelCase , prev_module_hook=UpperCAmelCase )
# We'll offload the last model manually.
_snake_case = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowercase (self ) -> Optional[Any]:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase )
def __call__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 512 , UpperCAmelCase = 512 , UpperCAmelCase = 100 , UpperCAmelCase = 4.0 , UpperCAmelCase = 1 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , ) -> int:
_snake_case = self._execution_device
_snake_case = guidance_scale > 1.0
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_snake_case = torch.cat(UpperCAmelCase , dim=0 )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_snake_case = torch.cat(UpperCAmelCase , dim=0 )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_snake_case = torch.cat(UpperCAmelCase , dim=0 )
_snake_case = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_snake_case = image_embeds.repeat_interleave(UpperCAmelCase , dim=0 )
_snake_case = negative_image_embeds.repeat_interleave(UpperCAmelCase , dim=0 )
_snake_case = hint.repeat_interleave(UpperCAmelCase , dim=0 )
_snake_case = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase )
_snake_case = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase )
self.scheduler.set_timesteps(UpperCAmelCase , device=UpperCAmelCase )
_snake_case = self.scheduler.timesteps
_snake_case = self.movq.config.latent_channels
_snake_case, _snake_case = downscale_height_and_width(UpperCAmelCase , UpperCAmelCase , self.movq_scale_factor )
# create initial latent
_snake_case = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_snake_case = {"""image_embeds""": image_embeds, """hint""": hint}
_snake_case = self.unet(
sample=UpperCAmelCase , timestep=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , added_cond_kwargs=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
if do_classifier_free_guidance:
_snake_case, _snake_case = noise_pred.split(latents.shape[1] , dim=1 )
_snake_case, _snake_case = noise_pred.chunk(2 )
_snake_case, _snake_case = variance_pred.chunk(2 )
_snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_snake_case = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_snake_case, _snake_case = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_snake_case = self.scheduler.step(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase , )[0]
# post-processing
_snake_case = self.movq.decode(UpperCAmelCase , force_not_quantize=UpperCAmelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_snake_case = image * 0.5 + 0.5
_snake_case = image.clamp(0 , 1 )
_snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_snake_case = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase ) | 341 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
lowerCAmelCase__ = logging.get_logger(__name__)
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , lowercase , lowercase , lowercase , **lowercase ) -> Union[str, Any]:
'''simple docstring'''
A__ = feature_size
A__ = sampling_rate
A__ = padding_value
A__ = kwargs.pop("padding_side" , "right" )
A__ = kwargs.pop("return_attention_mask" , lowercase )
super().__init__(**lowercase )
def UpperCamelCase ( self , lowercase , lowercase = True , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , ) -> BatchFeature:
'''simple docstring'''
if isinstance(lowercase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
A__ = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F' to this method that includes {self.model_input_names[0]}, but you provided'
F' {list(processed_features.keys() )}' )
A__ = processed_features[self.model_input_names[0]]
A__ = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowercase ) == 0:
if return_attention_mask:
A__ = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
A__ = required_input[0]
if isinstance(lowercase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
A__ = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowercase ):
A__ = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowercase ):
A__ = "tf"
elif is_torch_tensor(lowercase ):
A__ = "pt"
elif isinstance(lowercase , (int, float, list, tuple, np.ndarray) ):
A__ = "np"
else:
raise ValueError(
F'type of {first_element} unknown: {type(lowercase )}. '
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
A__ = to_numpy(lowercase )
else:
A__ = [to_numpy(lowercase ) for v in value]
# Convert padding_strategy in PaddingStrategy
A__ = self._get_padding_strategies(padding=lowercase , max_length=lowercase )
A__ = processed_features[self.model_input_names[0]]
A__ = len(lowercase )
if not all(len(lowercase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
A__ = []
for i in range(lowercase ):
A__ = {k: v[i] for k, v in processed_features.items()}
# truncation
A__ = self._truncate(
lowercase , max_length=lowercase , pad_to_multiple_of=lowercase , truncation=lowercase , )
truncated_inputs.append(lowercase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
A__ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
A__ = PaddingStrategy.MAX_LENGTH
A__ = {}
for i in range(lowercase ):
# padding
A__ = self._pad(
truncated_inputs[i] , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , )
for key, value in outputs.items():
if key not in batch_outputs:
A__ = []
if value.dtype is np.dtype(np.floataa ):
A__ = value.astype(np.floataa )
batch_outputs[key].append(lowercase )
return BatchFeature(lowercase , tensor_type=lowercase )
def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ) -> dict:
'''simple docstring'''
A__ = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
A__ = len(lowercase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A__ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
A__ = np.ones(len(lowercase ) , dtype=np.intaa )
if needs_to_be_padded:
A__ = max_length - len(lowercase )
if self.padding_side == "right":
if return_attention_mask:
A__ = np.pad(
processed_features["attention_mask"] , (0, difference) )
A__ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
A__ = np.pad(
lowercase , lowercase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
A__ = np.pad(
processed_features["attention_mask"] , (difference, 0) )
A__ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
A__ = np.pad(
lowercase , lowercase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , ) -> Union[str, Any]:
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
A__ = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A__ = len(lowercase ) > max_length
if needs_to_be_truncated:
A__ = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
A__ = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCamelCase ( self , lowercase=False , lowercase=None ) -> Any:
'''simple docstring'''
if padding is not False:
if padding is True:
A__ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowercase , lowercase ):
A__ = PaddingStrategy(lowercase )
elif isinstance(lowercase , lowercase ):
A__ = padding
else:
A__ = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 68 | 0 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_ = logging.getLogger()
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , """all_results.json""" )
if os.path.exists(_UpperCamelCase ):
with open(_UpperCamelCase , """r""" ) as f:
SCREAMING_SNAKE_CASE = json.load(_UpperCamelCase )
else:
raise ValueError(F"""can\'t find {path}""" )
return results
SCREAMING_SNAKE_CASE_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
import xla_spawn
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"""\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n """.split()
with patch.object(a_ ,"""argv""" ,a_ ):
SCREAMING_SNAKE_CASE = time()
xla_spawn.main()
SCREAMING_SNAKE_CASE = time()
SCREAMING_SNAKE_CASE = get_results(a_ )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start ,500 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
import xla_spawn
SCREAMING_SNAKE_CASE = """\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n """.split()
with patch.object(a_ ,"""argv""" ,a_ ):
xla_spawn.main()
| 359 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = TypeVar("""DatasetType""", Dataset, IterableDataset)
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(_SCREAMING_SNAKE_CASE ):
if not isinstance(_SCREAMING_SNAKE_CASE , (Dataset, IterableDataset) ):
if isinstance(_SCREAMING_SNAKE_CASE , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"""is an empty dataset dictionary.""" )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(_SCREAMING_SNAKE_CASE )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_SCREAMING_SNAKE_CASE ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_SCREAMING_SNAKE_CASE ).__name__}.""" )
if i == 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = (
(Dataset, IterableDataset) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else (IterableDataset, Dataset)
)
elif not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , info=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , stopping_strategy=_SCREAMING_SNAKE_CASE )
else:
return _interleave_iterable_datasets(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , info=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , stopping_strategy=_SCREAMING_SNAKE_CASE )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(_SCREAMING_SNAKE_CASE ):
if not isinstance(_SCREAMING_SNAKE_CASE , (Dataset, IterableDataset) ):
if isinstance(_SCREAMING_SNAKE_CASE , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"""is an empty dataset dictionary.""" )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(_SCREAMING_SNAKE_CASE )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_SCREAMING_SNAKE_CASE ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_SCREAMING_SNAKE_CASE ).__name__}.""" )
if i == 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = (
(Dataset, IterableDataset) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else (IterableDataset, Dataset)
)
elif not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_SCREAMING_SNAKE_CASE , info=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , axis=_SCREAMING_SNAKE_CASE )
else:
return _concatenate_iterable_datasets(_SCREAMING_SNAKE_CASE , info=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , axis=_SCREAMING_SNAKE_CASE )
| 193 | 0 |
"""simple docstring"""
def __a ( __lowerCamelCase ):
return 10 - x * x
def __a ( __lowerCamelCase, __lowerCamelCase ):
if equation(lowerCAmelCase__ ) * equation(lowerCAmelCase__ ) >= 0:
raise ValueError("Wrong space!" )
UpperCAmelCase_ : Dict = a
while (b - a) >= 0.01:
# Find middle point
UpperCAmelCase_ : Any = (a + b) / 2
# Check if middle point is root
if equation(lowerCAmelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowerCAmelCase__ ) * equation(lowerCAmelCase__ ) < 0:
UpperCAmelCase_ : int = c
else:
UpperCAmelCase_ : Union[str, Any] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 61 | """simple docstring"""
from scipy.stats import pearsonr
import datasets
__lowerCAmelCase : List[Any] ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
__lowerCAmelCase : Optional[int] ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
__lowerCAmelCase : str ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def A__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
"""simple docstring"""
if return_pvalue:
lowercase = pearsonr(__lowerCAmelCase , __lowerCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__lowerCAmelCase , __lowerCAmelCase )[0] )}
| 197 | 0 |
"""simple docstring"""
# Algorithm for the pigeonhole sorting
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = min(lowercase_ ) # min() finds the minimum value
UpperCAmelCase = max(lowercase_ ) # max() finds the maximum value
UpperCAmelCase = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
UpperCAmelCase = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowercase_ , lowercase_ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
UpperCAmelCase = 0
for count in range(lowercase_ ):
while holes[count] > 0:
holes[count] -= 1
UpperCAmelCase = count + min_val
i += 1
def _lowerCAmelCase ( ):
UpperCAmelCase = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowercase_ )
print('Sorted order is:' , ' '.join(lowercase_ ) )
if __name__ == "__main__":
main()
| 181 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
snake_case_ = get_tests_dir("""fixtures/dummy-config.json""")
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :int ) -> Optional[Any]:
UpperCAmelCase = 0
def UpperCAmelCase__ ( self :List[str] ) -> str:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) )
def UpperCAmelCase__ ( self :List[Any] ) -> List[str]:
UpperCAmelCase = AutoConfig.from_pretrained('bert-base-uncased' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> int:
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :int ) -> Any:
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> List[str]:
UpperCAmelCase = AutoConfig.for_model('roberta' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :str ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
UpperCAmelCase = os.path.join(lowercase_ , 'fake-roberta' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with open(os.path.join(lowercase_ , 'config.json' ) , 'w' ) as f:
f.write(json.dumps({} ) )
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ )
self.assertEqual(type(lowercase_ ) , lowercase_ )
def UpperCAmelCase__ ( self :int ) -> Union[str, Any]:
try:
AutoConfig.register('custom' , lowercase_ )
# Wrong model type will raise an error
with self.assertRaises(lowercase_ ):
AutoConfig.register('model' , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoConfig.register('bert' , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ )
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def UpperCAmelCase__ ( self :str ) -> Dict:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase = AutoConfig.from_pretrained('bert-base' )
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[int]:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase__ ( self :List[str] ) -> str:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ):
UpperCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' )
def UpperCAmelCase__ ( self :str ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=lowercase_ )
UpperCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=lowercase_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ )
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig' )
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[int]:
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """new-model"""
try:
AutoConfig.register('new-model' , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote code is disabled, we load the local one.
UpperCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=lowercase_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote is enabled, we load from the Hub
UpperCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=lowercase_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 181 | 1 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case_ ( self : int ):
__lowercase , __lowercase : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
__lowercase : int = '''A painting of a squirrel eating a burger'''
__lowercase : Optional[int] = jax.device_count()
__lowercase : Dict = num_samples * [prompt]
__lowercase : str = sd_pipe.prepare_inputs(_snake_case )
__lowercase : Optional[Any] = replicate(_snake_case )
__lowercase : str = shard(_snake_case )
__lowercase : List[Any] = jax.random.PRNGKey(0 )
__lowercase : List[Any] = jax.random.split(_snake_case , jax.device_count() )
__lowercase : List[Any] = sd_pipe(_snake_case , _snake_case , _snake_case , num_inference_steps=25 , jit=_snake_case )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase : int = images[0, 253:256, 253:256, -1]
__lowercase : List[str] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase : Union[str, Any] = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def snake_case_ ( self : Dict ):
__lowercase : Optional[int] = '''stabilityai/stable-diffusion-2'''
__lowercase , __lowercase : Any = FlaxDPMSolverMultistepScheduler.from_pretrained(_snake_case , subfolder='''scheduler''' )
__lowercase , __lowercase : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
_snake_case , scheduler=_snake_case , revision='''bf16''' , dtype=jnp.bfloataa , )
__lowercase : List[Any] = scheduler_params
__lowercase : Tuple = '''A painting of a squirrel eating a burger'''
__lowercase : Optional[Any] = jax.device_count()
__lowercase : Optional[Any] = num_samples * [prompt]
__lowercase : List[str] = sd_pipe.prepare_inputs(_snake_case )
__lowercase : Optional[Any] = replicate(_snake_case )
__lowercase : List[Any] = shard(_snake_case )
__lowercase : Any = jax.random.PRNGKey(0 )
__lowercase : str = jax.random.split(_snake_case , jax.device_count() )
__lowercase : Dict = sd_pipe(_snake_case , _snake_case , _snake_case , num_inference_steps=25 , jit=_snake_case )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase : Optional[int] = images[0, 253:256, 253:256, -1]
__lowercase : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase : Tuple = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 156 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__lowerCAmelCase : Optional[Any] = {"UserAgent": UserAgent().random}
def UpperCAmelCase_ ( __lowerCAmelCase ) -> dict:
__lowercase : Optional[Any] = script.contents[0]
__lowercase : int = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Optional[int] ):
__lowercase : Dict = F'https://www.instagram.com/{username}/'
__lowercase : Tuple = self.get_json()
def snake_case_ ( self : Tuple ):
__lowercase : List[Any] = requests.get(self.url , headers=_snake_case ).text
__lowercase : str = BeautifulSoup(_snake_case , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Optional[Any] ):
return F'{self.__class__.__name__}(\'{self.username}\')'
def __str__( self : Optional[int] ):
return F'{self.fullname} ({self.username}) is {self.biography}'
@property
def snake_case_ ( self : Dict ):
return self.user_data["username"]
@property
def snake_case_ ( self : List[Any] ):
return self.user_data["full_name"]
@property
def snake_case_ ( self : Optional[Any] ):
return self.user_data["biography"]
@property
def snake_case_ ( self : Any ):
return self.user_data["business_email"]
@property
def snake_case_ ( self : int ):
return self.user_data["external_url"]
@property
def snake_case_ ( self : Union[str, Any] ):
return self.user_data["edge_followed_by"]["count"]
@property
def snake_case_ ( self : Dict ):
return self.user_data["edge_follow"]["count"]
@property
def snake_case_ ( self : Any ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def snake_case_ ( self : int ):
return self.user_data["profile_pic_url_hd"]
@property
def snake_case_ ( self : Optional[Any] ):
return self.user_data["is_verified"]
@property
def snake_case_ ( self : Optional[Any] ):
return self.user_data["is_private"]
def UpperCAmelCase_ ( __lowerCAmelCase = "github" ) -> None:
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
__lowercase : Dict = InstagramUser(__lowerCAmelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __lowerCAmelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[str] = InstagramUser("github")
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 156 | 1 |
"""simple docstring"""
__A : List[Any] = 256
# Modulus to hash a string
__A : int = 1000003
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
if p_len > t_len:
return False
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_UpperCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_UpperCAmelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_UpperCAmelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = '''abc1abc12'''
_UpperCAmelCase = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
_UpperCAmelCase = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Test 2)
_UpperCAmelCase = '''ABABX'''
_UpperCAmelCase = '''ABABZABABYABABX'''
assert rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Test 3)
_UpperCAmelCase = '''AAAB'''
_UpperCAmelCase = '''ABAAAAAB'''
assert rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Test 4)
_UpperCAmelCase = '''abcdabcy'''
_UpperCAmelCase = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Test 5)
_UpperCAmelCase = '''Lü'''
_UpperCAmelCase = '''Lüsai'''
assert rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = '''Lue'''
assert not rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 326 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return "\n".join(
f'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 326 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__snake_case = False
@skip_mps
class _lowerCAmelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = StableDiffusionAttendAndExcitePipeline
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
__UpperCAmelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowerCamelCase ( cls ) -> List[str]:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(UpperCamelCase__ )
@classmethod
def lowerCamelCase ( cls ) -> Union[str, Any]:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCamelCase__ )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
snake_case : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , )
snake_case : Optional[int] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
snake_case : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
snake_case : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
snake_case : Dict = CLIPTextModel(UpperCamelCase__ )
snake_case : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
snake_case : str = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> str:
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("mps" ):
snake_case : int = torch.manual_seed(UpperCamelCase__ )
else:
snake_case : List[Any] = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
snake_case : Optional[Any] = {
"prompt": "a cat and a frog",
"token_indices": [2, 5],
"generator": generator,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
"max_iter_to_alter": 2,
"thresholds": {0: 0.7},
}
return inputs
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Union[str, Any] = "cpu"
snake_case : List[str] = self.get_dummy_components()
snake_case : Union[str, Any] = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Any = self.get_dummy_inputs(UpperCamelCase__ )
snake_case : Dict = pipe(**UpperCamelCase__ ).images
snake_case : Dict = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
snake_case : Any = np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] )
snake_case : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5e-4 )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls ) -> List[Any]:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(UpperCamelCase__ )
@classmethod
def lowerCamelCase ( cls ) -> str:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCamelCase__ )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : int = torch.manual_seed(51 )
snake_case : Any = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
pipe.to("cuda" )
snake_case : Union[str, Any] = "a painting of an elephant with glasses"
snake_case : Optional[Any] = [5, 7]
snake_case : int = pipe(
prompt=UpperCamelCase__ , token_indices=UpperCamelCase__ , guidance_scale=7.5 , generator=UpperCamelCase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="numpy" , ).images[0]
snake_case : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 203 |
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
__snake_case = logging.getLogger()
def __lowerCAmelCase ( ) -> str:
"""simple docstring"""
snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument("-f" )
snake_case : Tuple = parser.parse_args()
return args.f
def __lowerCAmelCase ( lowercase : Optional[int] ) -> Dict:
"""simple docstring"""
snake_case : Any = {}
snake_case : int = os.path.join(lowercase , "all_results.json" )
if os.path.exists(lowercase ):
with open(lowercase , "r" ) as f:
snake_case : Optional[int] = json.load(lowercase )
else:
raise ValueError(F'can\'t find {path}' )
return results
def __lowerCAmelCase ( ) -> Tuple:
"""simple docstring"""
snake_case : Union[str, Any] = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
__snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _lowerCAmelCase ( snake_case_ ):
@classmethod
def lowerCamelCase ( cls ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[int] = tempfile.mkdtemp()
snake_case : int = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
snake_case : Union[str, Any] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def lowerCamelCase ( cls ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : str = self.get_auto_remove_tmp_dir()
snake_case : Dict = F'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
snake_case : Union[str, Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Dict = self.get_auto_remove_tmp_dir()
snake_case : Dict = F'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
snake_case : Any = get_results(UpperCamelCase__ )
self.assertLess(result["perplexity"] , 100 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = self.get_auto_remove_tmp_dir()
snake_case : str = F'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
snake_case : Dict = get_results(UpperCamelCase__ )
self.assertLess(result["perplexity"] , 42 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = 7 if get_gpu_count() > 1 else 2
snake_case : Optional[int] = self.get_auto_remove_tmp_dir()
snake_case : List[str] = F'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
snake_case : str = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : List[Any] = self.get_auto_remove_tmp_dir()
snake_case : Dict = F'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
snake_case : Dict = get_results(UpperCamelCase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 28 )
self.assertGreaterEqual(result["eval_exact"] , 28 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Tuple = self.get_auto_remove_tmp_dir()
snake_case : List[str] = F'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
snake_case : Optional[Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Tuple = self.get_auto_remove_tmp_dir()
snake_case : Dict = F'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
snake_case : Any = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["eval_rouge1"] , 10 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = self.get_auto_remove_tmp_dir()
snake_case : int = F'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
snake_case : Optional[int] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["eval_bleu"] , 30 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "translation_no_trainer" ) ) )
@slow
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : str = logging.StreamHandler(sys.stdout )
logger.addHandler(UpperCamelCase__ )
snake_case : Dict = self.get_auto_remove_tmp_dir()
snake_case : Dict = F'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
snake_case : Optional[Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = self.get_auto_remove_tmp_dir()
snake_case : str = F'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
snake_case : str = get_results(UpperCamelCase__ )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "image_classification_no_trainer" ) ) )
| 203 | 1 |
'''simple docstring'''
def lowercase (_A = 1_0_0 ):
"""simple docstring"""
_lowerCAmelCase : int = set()
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Optional[Any] = n + 1 # maximum limit
for a in range(2 , _A ):
for b in range(2 , _A ):
_lowerCAmelCase : Optional[int] = a**b # calculates the current power
collect_powers.add(_A ) # adds the result to the set
return len(_A )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 25 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
__magic_name__ = 1
__magic_name__ = None
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
def a ( self ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
| 25 | 1 |
from __future__ import annotations
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int: # noqa: E741
while r - l > 1:
lowercase : Tuple = (l + r) // 2
if v[m] >= key:
lowercase : int = m
else:
lowercase : Any = m # noqa: E741
return r
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
if len(_UpperCAmelCase ) == 0:
return 0
lowercase : Union[str, Any] = [0] * len(_UpperCAmelCase )
lowercase : int = 1
lowercase : Optional[Any] = v[0]
for i in range(1 , len(_UpperCAmelCase ) ):
if v[i] < tail[0]:
lowercase : int = v[i]
elif v[i] > tail[length - 1]:
lowercase : int = v[i]
length += 1
else:
lowercase : List[Any] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCAmelCase : Union[str, Any] = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
lowerCAmelCase : int = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def A_ ( _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = create_model(
"HTSAT-tiny" , "roberta" , _UpperCAmelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=_UpperCAmelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = {}
SCREAMING_SNAKE_CASE_: Tuple = R".*sequential.(\d+).*"
SCREAMING_SNAKE_CASE_: Dict = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE_: Any = key.replace(_UpperCAmelCase , _UpperCAmelCase )
if re.match(_UpperCAmelCase , _UpperCAmelCase ):
# replace sequential layers with list
SCREAMING_SNAKE_CASE_: Optional[int] = re.match(_UpperCAmelCase , _UpperCAmelCase ).group(1 )
SCREAMING_SNAKE_CASE_: Dict = key.replace(f"sequential.{sequential_layer}." , f"layers.{int(_UpperCAmelCase )//3}.linear." )
elif re.match(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = int(re.match(_UpperCAmelCase , _UpperCAmelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
SCREAMING_SNAKE_CASE_: Optional[int] = 1 if projecton_layer == 0 else 2
SCREAMING_SNAKE_CASE_: Dict = key.replace(f"_projection.{projecton_layer}." , f"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
SCREAMING_SNAKE_CASE_: Tuple = value
SCREAMING_SNAKE_CASE_: List[str] = mixed_qkv.size(0 ) // 3
SCREAMING_SNAKE_CASE_: Any = mixed_qkv[:qkv_dim]
SCREAMING_SNAKE_CASE_: Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] = mixed_qkv[qkv_dim * 2 :]
SCREAMING_SNAKE_CASE_: str = query_layer
SCREAMING_SNAKE_CASE_: int = key_layer
SCREAMING_SNAKE_CASE_: List[Any] = value_layer
else:
SCREAMING_SNAKE_CASE_: int = value
return model_state_dict
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = init_clap(_UpperCAmelCase , enable_fusion=_UpperCAmelCase )
clap_model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] = clap_model.state_dict()
SCREAMING_SNAKE_CASE_: Optional[int] = rename_state_dict(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = ClapConfig()
SCREAMING_SNAKE_CASE_: Tuple = enable_fusion
SCREAMING_SNAKE_CASE_: Tuple = ClapModel(_UpperCAmelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
transformers_config.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
lowerCAmelCase : int = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 13 | 0 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a__ :
def __init__( self , A , A=13 , A=30 , A=2 , A=3 , A=True , A=True , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=10 , A=0.0_2 , A=3 , A=0.6 , A=None , ) -> Optional[int]:
'''simple docstring'''
a = parent
a = batch_size
a = image_size
a = patch_size
a = num_channels
a = is_training
a = use_labels
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = type_sequence_label_size
a = initializer_range
a = mask_ratio
a = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
a = (image_size // patch_size) ** 2
a = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase_ ( self , A , A , A ) -> Tuple:
'''simple docstring'''
a = ViTMAEModel(config=A )
model.to(A )
model.eval()
a = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self , A , A , A ) -> Optional[Any]:
'''simple docstring'''
a = ViTMAEForPreTraining(A )
model.to(A )
model.eval()
a = model(A )
a = (self.image_size // self.patch_size) ** 2
a = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
a = 1
a = ViTMAEForPreTraining(A )
model.to(A )
model.eval()
a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a = model(A )
a = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a : str = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a : Union[str, Any] = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
a : Union[str, Any] = False
a : str = False
a : Optional[int] = False
a : List[Any] = False
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
a = ViTMAEModelTester(self )
a = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
pass
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(A )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A )
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A )
def lowerCAmelCase_ ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
np.random.seed(2 )
a = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
a = torch.from_numpy(A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
a = pt_noise
super().check_pt_tf_models(A , A , A )
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(A )
model.to(A )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
a = model(**self._prepare_for_class(A , A ) )
a = outputs[0].cpu().numpy()
a = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A )
a = model_class.from_pretrained(A )
model.to(A )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
a = model(**self._prepare_for_class(A , A ) )
# Make sure we don't have nans
a = after_outputs[0].cpu().numpy()
a = 0
a = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A , 1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
pass
@slow
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = ViTMAEModel.from_pretrained(A )
self.assertIsNotNone(A )
def SCREAMING_SNAKE_CASE ( ) -> str:
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
np.random.seed(2 )
a = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(A )
a = self.default_image_processor
a = prepare_img()
a = image_processor(images=A , return_tensors="pt" ).to(A )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
a = ViTMAEConfig()
a = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
a = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
a = model(**A , noise=torch.from_numpy(A ).to(device=A ) )
# verify the logits
a = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , A )
a = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(A ) , atol=1e-4 ) )
| 359 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowercase__ : Optional[int] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowercase__ : int = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowercase__ : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowercase__ : Tuple = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def lowerCAmelCase_ ( self , A ) -> List[str]:
'''simple docstring'''
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def lowerCAmelCase_ ( self , A , A , A=0.9 , A=3 , A=0.5 ) -> Tuple:
'''simple docstring'''
if NLTK_VERSION >= version.Version("3.6.5" ):
a = [
meteor_score.single_meteor_score(
word_tokenize(A ) , word_tokenize(A ) , alpha=A , beta=A , gamma=A )
for ref, pred in zip(A , A )
]
else:
a = [
meteor_score.single_meteor_score(A , A , alpha=A , beta=A , gamma=A )
for ref, pred in zip(A , A )
]
return {"meteor": np.mean(A )}
| 180 | 0 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase__ = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=1 ) -> Dict:
_lowercase =tokenizer
_lowercase =dataset
_lowercase =len(UpperCAmelCase ) if n_tasks is None else n_tasks
_lowercase =n_copies
def __iter__(self ) -> Optional[Any]:
_lowercase =[]
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
_lowercase =self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =start_length
_lowercase =eof_strings
_lowercase =tokenizer
def __call__(self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
_lowercase =self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_lowercase =[]
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCAmelCase )
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =re.split('''(%s)''' % '''|'''.join(__snake_case ) , __snake_case )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=20 , **__snake_case ) -> Tuple:
"""simple docstring"""
_lowercase =defaultdict(__snake_case ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__snake_case ) ):
with torch.no_grad():
_lowercase =batch['''ids'''].shape[-1]
_lowercase =accelerator.unwrap_model(__snake_case ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__snake_case , **__snake_case )
# each task is generated batch_size times
_lowercase =batch['''task_id'''].repeat(__snake_case )
_lowercase =accelerator.pad_across_processes(
__snake_case , dim=1 , pad_index=tokenizer.pad_token_id )
_lowercase , _lowercase =accelerator.gather((generated_tokens, generated_tasks) )
_lowercase =generated_tokens.cpu().numpy()
_lowercase =generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__snake_case , __snake_case ):
gen_token_dict[task].append(__snake_case )
_lowercase =[[] for _ in range(__snake_case )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_lowercase =tokenizer.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
code_gens[task].append(remove_last_block(__snake_case ) )
return code_gens
def UpperCAmelCase_ ( ) -> str:
"""simple docstring"""
_lowercase =HfArgumentParser(__snake_case )
_lowercase =parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_lowercase =args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_lowercase ='''false'''
if args.num_workers is None:
_lowercase =multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_lowercase =Accelerator()
set_seed(args.seed , device_specific=__snake_case )
# Load model and tokenizer
_lowercase =AutoTokenizer.from_pretrained(args.model_ckpt )
_lowercase =tokenizer.eos_token
_lowercase =AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_lowercase ={
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __snake_case , __snake_case )] ),
}
# Load evaluation dataset and metric
_lowercase =load_dataset('''openai_humaneval''' )
_lowercase =load_metric('''code_eval''' )
_lowercase =args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
_lowercase =args.n_samples // args.batch_size
_lowercase =TokenizedDataset(__snake_case , human_eval['''test'''] , n_copies=__snake_case , n_tasks=__snake_case )
# do not confuse args.batch_size, which is actually the num_return_sequences
_lowercase =DataLoader(__snake_case , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_lowercase =code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
_lowercase , _lowercase =accelerator.prepare(__snake_case , __snake_case )
_lowercase =complete_code(
__snake_case , __snake_case , __snake_case , __snake_case , n_tasks=__snake_case , batch_size=args.batch_size , **__snake_case , )
if accelerator.is_main_process:
_lowercase =[]
for task in tqdm(range(__snake_case ) ):
_lowercase =human_eval['''test'''][task]['''test''']
_lowercase =F"check({human_eval['test'][task]['entry_point']})"
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
_lowercase , _lowercase =code_eval_metric.compute(
references=__snake_case , predictions=__snake_case , num_workers=args.num_workers )
print(F"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__snake_case , __snake_case )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 5 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowercase__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowercase__ = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Dict , **_UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] , **_UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , **_UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ (self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase )
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase )
def lowerCamelCase__ (self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase__ = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
lowercase__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(_UpperCAmelCase , return_tensors="""np""" )
lowercase__ = processor(images=_UpperCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = processor(text=_UpperCAmelCase )
lowercase__ = tokenizer(_UpperCAmelCase , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ (self : List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def lowerCamelCase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(_UpperCAmelCase )
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 305 | 0 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class a ( UpperCAmelCase__ ):
__lowerCAmelCase : Any = ['vqvae']
def __init__( self :List[Any] ,__lowercase :List[str] ,__lowercase :Optional[Any] ,__lowercase :Any ,__lowercase :Dict ,):
super().__init__()
self.register_modules(unet=_SCREAMING_SNAKE_CASE ,scheduler=_SCREAMING_SNAKE_CASE ,mel=_SCREAMING_SNAKE_CASE ,vqvae=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self :List[str] ):
return 5_0 if isinstance(self.scheduler ,_SCREAMING_SNAKE_CASE ) else 1_0_0_0
@torch.no_grad()
def __call__( self :Any ,__lowercase :int = 1 ,__lowercase :int = None ,__lowercase :str = None ,__lowercase :Dict = 0 ,__lowercase :Union[str, Any] = 0 ,__lowercase :Union[str, Any] = None ,__lowercase :Union[str, Any] = None ,__lowercase :int = 0 ,__lowercase :Tuple = 0 ,__lowercase :Union[str, Any] = None ,__lowercase :List[Any] = 0 ,__lowercase :Any = None ,__lowercase :List[Any] = None ,__lowercase :int=True ,):
snake_case__ : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
snake_case__ : Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
snake_case__ : List[str] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_SCREAMING_SNAKE_CASE ,device=self.device ,)
snake_case__ : List[str] = noise
snake_case__ : Tuple = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.mel.audio_slice_to_image(_SCREAMING_SNAKE_CASE )
snake_case__ : str = np.frombuffer(input_image.tobytes() ,dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
snake_case__ : str = (input_image / 2_5_5) * 2 - 1
snake_case__ : List[str] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
snake_case__ : List[str] = self.vqvae.encode(torch.unsqueeze(_SCREAMING_SNAKE_CASE ,0 ) ).latent_dist.sample(
generator=_SCREAMING_SNAKE_CASE )[0]
snake_case__ : List[str] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
snake_case__ : int = self.scheduler.add_noise(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,self.scheduler.timesteps[start_step - 1] )
snake_case__ : Optional[int] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
snake_case__ : Optional[int] = int(mask_start_secs * pixels_per_second )
snake_case__ : Tuple = int(mask_end_secs * pixels_per_second )
snake_case__ : Dict = self.scheduler.add_noise(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = self.unet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )["""sample"""]
else:
snake_case__ : int = self.unet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )["""sample"""]
if isinstance(self.scheduler ,_SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = self.scheduler.step(
model_output=_SCREAMING_SNAKE_CASE ,timestep=_SCREAMING_SNAKE_CASE ,sample=_SCREAMING_SNAKE_CASE ,eta=_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,)["""prev_sample"""]
else:
snake_case__ : Optional[Any] = self.scheduler.step(
model_output=_SCREAMING_SNAKE_CASE ,timestep=_SCREAMING_SNAKE_CASE ,sample=_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,)["""prev_sample"""]
if mask is not None:
if mask_start > 0:
snake_case__ : Union[str, Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
snake_case__ : int = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
snake_case__ : List[Any] = 1 / self.vqvae.config.scaling_factor * images
snake_case__ : Dict = self.vqvae.decode(_SCREAMING_SNAKE_CASE )["""sample"""]
snake_case__ : Any = (images / 2 + 0.5).clamp(0 ,1 )
snake_case__ : Union[str, Any] = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
snake_case__ : int = (images * 2_5_5).round().astype('''uint8''' )
snake_case__ : Optional[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_SCREAMING_SNAKE_CASE ,mode='''RGB''' ).convert('''L''' ) for _ in images) )
snake_case__ : List[Any] = [self.mel.image_to_audio(_SCREAMING_SNAKE_CASE ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_SCREAMING_SNAKE_CASE )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_SCREAMING_SNAKE_CASE ) )
@torch.no_grad()
def __lowerCamelCase ( self :List[str] ,__lowercase :Any ,__lowercase :str = 5_0 ):
assert isinstance(self.scheduler ,_SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = np.array(
[np.frombuffer(image.tobytes() ,dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
snake_case__ : str = (sample / 2_5_5) * 2 - 1
snake_case__ : int = torch.Tensor(_SCREAMING_SNAKE_CASE ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
snake_case__ : Any = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
snake_case__ : Tuple = self.scheduler.alphas_cumprod[t]
snake_case__ : int = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
snake_case__ : List[str] = 1 - alpha_prod_t
snake_case__ : Dict = self.unet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )["""sample"""]
snake_case__ : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output
snake_case__ : List[str] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
snake_case__ : Optional[Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCamelCase ( __lowercase :List[Any] ,__lowercase :List[Any] ,__lowercase :int ):
snake_case__ : Optional[int] = acos(torch.dot(torch.flatten(_SCREAMING_SNAKE_CASE ) ,torch.flatten(_SCREAMING_SNAKE_CASE ) ) / torch.norm(_SCREAMING_SNAKE_CASE ) / torch.norm(_SCREAMING_SNAKE_CASE ) )
return sin((1 - alpha) * theta ) * xa / sin(_SCREAMING_SNAKE_CASE ) + sin(alpha * theta ) * xa / sin(_SCREAMING_SNAKE_CASE )
| 359 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class a :
def __init__( self :str ,__lowercase :Optional[Any] ,__lowercase :List[Any]=1_3 ,__lowercase :str=7 ,__lowercase :Dict=True ,__lowercase :Any=True ,__lowercase :str=True ,__lowercase :Any=True ,__lowercase :Tuple=9_9 ,__lowercase :List[str]=3_2 ,__lowercase :int=5 ,__lowercase :Union[str, Any]=4 ,__lowercase :List[str]=4 ,__lowercase :Any="gelu" ,__lowercase :Any=0.0 ,__lowercase :Tuple=0.1 ,__lowercase :str=True ,__lowercase :Tuple=5_1_2 ,__lowercase :Dict=1_6 ,__lowercase :Tuple=2 ,__lowercase :List[str]=0.02 ,__lowercase :Dict=3 ,__lowercase :Optional[int]=4 ,__lowercase :Tuple=None ,):
snake_case__ : Optional[int] = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : Optional[Any] = seq_length
snake_case__ : Tuple = is_training
snake_case__ : Optional[Any] = use_input_mask
snake_case__ : List[Any] = use_token_type_ids
snake_case__ : str = use_labels
snake_case__ : List[Any] = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : int = intermediate_multiple_size
snake_case__ : Tuple = hidden_act
snake_case__ : Optional[Any] = hidden_dropout
snake_case__ : str = attention_dropout
snake_case__ : List[str] = weight_tying
snake_case__ : Optional[Any] = max_position_embeddings
snake_case__ : Optional[int] = type_vocab_size
snake_case__ : str = type_sequence_label_size
snake_case__ : Dict = initializer_range
snake_case__ : int = num_labels
snake_case__ : int = num_choices
snake_case__ : int = scope
def __lowerCamelCase ( self :List[str] ):
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case__ : str = None
if self.use_input_mask:
snake_case__ : int = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case__ : Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __lowerCamelCase ( self :int ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_multiple_size=self.intermediate_multiple_size ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,weight_tying=self.weight_tying ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowercase ,initializer_range=self.initializer_range ,)
def __lowerCamelCase ( self :str ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = self.prepare_config_and_inputs()
snake_case__ : Union[str, Any] = True
return config, input_ids, input_mask, token_labels
def __lowerCamelCase ( self :List[Any] ,__lowercase :Any ,__lowercase :Dict ,__lowercase :Optional[Any] ):
snake_case__ : Union[str, Any] = GPTNeoXJapaneseModel(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Union[str, Any] = model(__lowercase ,attention_mask=__lowercase )
snake_case__ : Optional[Any] = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self :Any ,__lowercase :Union[str, Any] ,__lowercase :Tuple ,__lowercase :Union[str, Any] ):
snake_case__ : Any = True
snake_case__ : Tuple = GPTNeoXJapaneseModel(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : str = model(__lowercase ,attention_mask=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self :Any ,__lowercase :List[Any] ,__lowercase :List[Any] ,__lowercase :Optional[Any] ,__lowercase :Any ):
snake_case__ : Any = GPTNeoXJapaneseForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Any = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self :Optional[int] ,__lowercase :Any ,__lowercase :int ,__lowercase :List[str] ):
snake_case__ : Optional[int] = True
snake_case__ : Optional[int] = GPTNeoXJapaneseForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
# first forward pass
snake_case__ : List[Any] = model(__lowercase ,attention_mask=__lowercase ,use_cache=__lowercase )
snake_case__ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : Optional[Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
snake_case__ : int = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
snake_case__ : Optional[int] = torch.cat([input_ids, next_tokens] ,dim=-1 )
snake_case__ : Optional[int] = torch.cat([input_mask, next_mask] ,dim=-1 )
snake_case__ : Dict = model(__lowercase ,attention_mask=__lowercase ,output_hidden_states=__lowercase )
snake_case__ : Tuple = output_from_no_past['''hidden_states'''][0]
snake_case__ : List[str] = model(
__lowercase ,attention_mask=__lowercase ,past_key_values=__lowercase ,output_hidden_states=__lowercase ,)['''hidden_states'''][0]
# select random slice
snake_case__ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
snake_case__ : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase ,__lowercase ,atol=1e-3 ) )
def __lowerCamelCase ( self :Dict ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = config_and_inputs
snake_case__ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
__lowerCAmelCase : List[str] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
__lowerCAmelCase : int = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : str = False
def __lowerCamelCase ( self :Any ):
snake_case__ : int = GPTNeoXJapaneseModelTester(self )
snake_case__ : Any = ConfigTester(self ,config_class=__lowercase ,hidden_size=3_7 )
def __lowerCamelCase ( self :Any ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self :str ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowercase ,__lowercase ,__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__lowercase ,__lowercase ,__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
# This regression test was failing with PyTorch < 1.3
snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case__ : List[str] = None
self.model_tester.create_and_check_model_as_decoder(__lowercase ,__lowercase ,__lowercase )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__lowercase ,__lowercase ,__lowercase )
def __lowerCamelCase ( self :str ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__lowercase )
@slow
def __lowerCamelCase ( self :Dict ):
snake_case__ : str = '''abeja/gpt-neox-japanese-2.7b'''
snake_case__ : int = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
snake_case__ : Optional[int] = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
snake_case__ : Optional[int] = GPTNeoXJapaneseTokenizer.from_pretrained(__lowercase )
snake_case__ : Union[str, Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(__lowercase )
snake_case__ : Optional[int] = []
for prompt in prompts:
snake_case__ : Dict = tokenizer(__lowercase ,return_tensors='''pt''' ).input_ids
snake_case__ : Union[str, Any] = model.generate(__lowercase ,max_length=5_0 )
snake_case__ : int = tokenizer.batch_decode(__lowercase ,skip_special_tokens=__lowercase )
predicted_outputs += generated_string
self.assertListEqual(__lowercase ,__lowercase )
| 44 | 0 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def __magic_name__ ( __lowerCAmelCase : List[str]=None ) -> Dict:
if subparsers is not None:
__lowerCamelCase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
__lowerCamelCase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
__lowerCamelCase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=__lowerCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=__lowerCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
__lowerCamelCase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=__lowerCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=__lowerCAmelCase )
return parser
def __magic_name__ ( __lowerCAmelCase : int ) -> List[Any]:
__lowerCamelCase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__lowerCAmelCase ):
__lowerCamelCase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__lowerCamelCase = defaults.command_file
if not args.command and defaults.commands is not None:
__lowerCamelCase = defaults.commands
if not args.tpu_name:
__lowerCamelCase = defaults.tpu_name
if not args.tpu_zone:
__lowerCamelCase = defaults.tpu_zone
if args.accelerate_version == "dev":
__lowerCamelCase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
__lowerCamelCase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , __lowerCAmelCase ):
__lowerCamelCase = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
__lowerCamelCase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __lowerCAmelCase ):
__lowerCamelCase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__lowerCamelCase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
__lowerCamelCase = '''; '''.join(__lowerCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__lowerCamelCase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {' '.join(__lowerCAmelCase )}''' )
return
subprocess.run(__lowerCAmelCase )
print('''Successfully setup pod.''' )
def __magic_name__ ( ) -> Any:
__lowerCamelCase = tpu_command_parser()
__lowerCamelCase = parser.parse_args()
tpu_command_launcher(__lowerCAmelCase )
| 270 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 270 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if len(lowerCAmelCase_ ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_snake_case = list(lowerCAmelCase_ )
_snake_case = degree
def __add__( self , lowerCAmelCase_ ):
"""simple docstring"""
if self.degree > polynomial_a.degree:
_snake_case = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCAmelCase_ )
else:
_snake_case = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCAmelCase_ )
def __sub__( self , lowerCAmelCase_ ):
"""simple docstring"""
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ):
"""simple docstring"""
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
"""simple docstring"""
_snake_case = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCAmelCase_ )
return polynomial
def __repr__( self ):
"""simple docstring"""
return self.__str__()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = [0] * self.degree
for i in range(self.degree ):
_snake_case = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ = 0 ):
"""simple docstring"""
_snake_case = [0] * (self.degree + 2)
_snake_case = constant
for i in range(self.degree + 1 ):
_snake_case = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCAmelCase_ )
def __eq__( self , lowerCAmelCase_ ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , lowerCAmelCase_ ):
"""simple docstring"""
return not self.__eq__(lowerCAmelCase_ )
| 363 |
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowercase : int = NewType("DataClass", Any)
lowercase : Dict = NewType("DataClassType", Any)
def SCREAMING_SNAKE_CASE__ ( __A ) -> Optional[Any]:
if isinstance(__A , __A ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def SCREAMING_SNAKE_CASE__ ( __A ) -> Callable[[str], Any]:
_snake_case = {str(__A ): choice for choice in choices}
return lambda __A : str_to_choice.get(__A , __A )
def SCREAMING_SNAKE_CASE__ ( *,
__A = None , __A = None , __A = dataclasses.MISSING , __A = dataclasses.MISSING , __A = None , **__A , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_snake_case = {}
if aliases is not None:
_snake_case = aliases
if help is not None:
_snake_case = help
return dataclasses.field(metadata=__A , default=__A , default_factory=__A , **__A )
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = 42
def __init__( self , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
if "formatter_class" not in kwargs:
_snake_case = ArgumentDefaultsHelpFormatter
super().__init__(**lowerCAmelCase_ )
if dataclasses.is_dataclass(lowerCAmelCase_ ):
_snake_case = [dataclass_types]
_snake_case = list(lowerCAmelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCAmelCase_ )
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = F'--{field.name}'
_snake_case = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowerCAmelCase_ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
_snake_case = kwargs.pop('aliases' , [] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = [aliases]
_snake_case = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(lowerCAmelCase_ , 'UnionType' ) and isinstance(lowerCAmelCase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCAmelCase_ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F' Problem encountered in field \'{field.name}\'.' )
if type(lowerCAmelCase_ ) not in field.type.__args__:
# filter `str` in Union
_snake_case = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_snake_case = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_snake_case = (
field.type.__args__[0] if isinstance(lowerCAmelCase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
_snake_case = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_snake_case = {}
if origin_type is Literal or (isinstance(field.type , lowerCAmelCase_ ) and issubclass(field.type , lowerCAmelCase_ )):
if origin_type is Literal:
_snake_case = field.type.__args__
else:
_snake_case = [x.value for x in field.type]
_snake_case = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
_snake_case = field.default
else:
_snake_case = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_snake_case = copy(lowerCAmelCase_ )
# Hack because type=bool in argparse does not behave as we want.
_snake_case = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_snake_case = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_snake_case = default
# This tells argparse we accept 0 or 1 value after --field_name
_snake_case = '?'
# This is the value that will get picked if we do --field_name (without value)
_snake_case = True
elif isclass(lowerCAmelCase_ ) and issubclass(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = field.type.__args__[0]
_snake_case = '+'
if field.default_factory is not dataclasses.MISSING:
_snake_case = field.default_factory()
elif field.default is dataclasses.MISSING:
_snake_case = True
else:
_snake_case = field.type
if field.default is not dataclasses.MISSING:
_snake_case = field.default
elif field.default_factory is not dataclasses.MISSING:
_snake_case = field.default_factory()
else:
_snake_case = True
parser.add_argument(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_snake_case = False
parser.add_argument(F'--no_{field.name}' , action='store_false' , dest=field.name , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if hasattr(lowerCAmelCase_ , '_argument_group_name' ):
_snake_case = self.add_argument_group(dtype._argument_group_name )
else:
_snake_case = self
try:
_snake_case = get_type_hints(lowerCAmelCase_ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowerCAmelCase_ ):
_snake_case = '.'.join(map(lowerCAmelCase_ , sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(lowerCAmelCase_ ):
if not field.init:
continue
_snake_case = type_hints[field.name]
self._parse_dataclass_field(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_snake_case = []
if args_filename:
args_files.append(Path(lowerCAmelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_snake_case = ArgumentParser()
args_file_parser.add_argument(lowerCAmelCase_ , type=lowerCAmelCase_ , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
_snake_case , _snake_case = args_file_parser.parse_known_args(args=lowerCAmelCase_ )
_snake_case = vars(lowerCAmelCase_ ).get(args_file_flag.lstrip('-' ) , lowerCAmelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(lowerCAmelCase_ ) for p in cmd_args_file_paths] )
_snake_case = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_snake_case = file_args + args if args is not None else file_args + sys.argv[1:]
_snake_case , _snake_case = self.parse_known_args(args=lowerCAmelCase_ )
_snake_case = []
for dtype in self.dataclass_types:
_snake_case = {f.name for f in dataclasses.fields(lowerCAmelCase_ ) if f.init}
_snake_case = {k: v for k, v in vars(lowerCAmelCase_ ).items() if k in keys}
for k in keys:
delattr(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = dtype(**lowerCAmelCase_ )
outputs.append(lowerCAmelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCAmelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ):
"""simple docstring"""
_snake_case = set(args.keys() )
_snake_case = []
for dtype in self.dataclass_types:
_snake_case = {f.name for f in dataclasses.fields(lowerCAmelCase_ ) if f.init}
_snake_case = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_snake_case = dtype(**lowerCAmelCase_ )
outputs.append(lowerCAmelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(lowerCAmelCase_ )}' )
return tuple(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ):
"""simple docstring"""
with open(Path(lowerCAmelCase_ ) , encoding='utf-8' ) as open_json_file:
_snake_case = json.loads(open_json_file.read() )
_snake_case = self.parse_dict(lowerCAmelCase_ , allow_extra_keys=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ):
"""simple docstring"""
_snake_case = self.parse_dict(yaml.safe_load(Path(lowerCAmelCase_ ).read_text() ) , allow_extra_keys=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 160 | 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = ''
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , __lowercase = None , __lowercase = None , **__lowercase , ) -> int:
super().__init__(self , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = repo_info
lowerCAmelCase_ : Union[str, Any] = token
lowerCAmelCase_ : str = None
def lowercase_ ( self ) -> Optional[Any]:
if self.dir_cache is None:
lowerCAmelCase_ : List[str] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowerCAmelCase_ : Tuple = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(SCREAMING_SNAKE_CASE_ ): {'''name''': str(SCREAMING_SNAKE_CASE_ ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowercase_ ( self , __lowercase , __lowercase = "rb" , **__lowercase , ) -> Optional[Any]:
if not isinstance(self.repo_info , SCREAMING_SNAKE_CASE_ ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
lowerCAmelCase_ : int = hf_hub_url(self.repo_info.id , SCREAMING_SNAKE_CASE_ , revision=self.repo_info.sha )
return fsspec.open(
SCREAMING_SNAKE_CASE_ , mode=SCREAMING_SNAKE_CASE_ , headers=get_authentication_headers_for_url(SCREAMING_SNAKE_CASE_ , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def lowercase_ ( self , __lowercase , **__lowercase ) -> int:
self._get_dirs()
lowerCAmelCase_ : Tuple = self._strip_protocol(SCREAMING_SNAKE_CASE_ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , __lowercase , __lowercase=False , **__lowercase ) -> Optional[int]:
self._get_dirs()
lowerCAmelCase_ : Optional[Any] = PurePosixPath(path.strip('''/''' ) )
lowerCAmelCase_ : List[str] = {}
for p, f in self.dir_cache.items():
lowerCAmelCase_ : Dict = PurePosixPath(p.strip('''/''' ) )
lowerCAmelCase_ : List[Any] = p.parent
if root == path:
lowerCAmelCase_ : int = f
lowerCAmelCase_ : Optional[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out ) | 262 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 371 | import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive", [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
], )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
UpperCAmelCase__ , UpperCAmelCase__ = input_paths_and_base_extractors[compression_format]
if input_path is None:
UpperCAmelCase__ = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
assert base_extractor.is_extractable(__A )
UpperCAmelCase__ = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(__A, __A )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ = file_path.read_text(encoding="utf-8" )
else:
UpperCAmelCase__ = output_path.read_text(encoding="utf-8" )
UpperCAmelCase__ = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive", [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
], )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
UpperCAmelCase__ = input_paths[compression_format]
if input_path is None:
UpperCAmelCase__ = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
UpperCAmelCase__ = Extractor.infer_extractor_format(__A )
assert extractor_format is not None
UpperCAmelCase__ = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(__A, __A, __A )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ = file_path.read_text(encoding="utf-8" )
else:
UpperCAmelCase__ = output_path.read_text(encoding="utf-8" )
UpperCAmelCase__ = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowerCAmelCase_ ( __A, __A ) -> List[str]:
'''simple docstring'''
import tarfile
UpperCAmelCase__ = tmp_path / "data_dot_dot"
directory.mkdir()
UpperCAmelCase__ = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(__A, "w" ) as f:
f.add(__A, arcname=os.path.join("..", text_file.name ) )
return path
@pytest.fixture
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
import tarfile
UpperCAmelCase__ = tmp_path / "data_sym_link"
directory.mkdir()
UpperCAmelCase__ = directory / "tar_file_with_sym_link.tar"
os.symlink("..", directory / "subdir", target_is_directory=__A )
with tarfile.TarFile(__A, "w" ) as f:
f.add(str(directory / "subdir" ), arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log", [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")], )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
UpperCAmelCase__ = insecure_tar_files[insecure_tar_file]
UpperCAmelCase__ = tmp_path / "extracted"
TarExtractor.extract(__A, __A )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
UpperCAmelCase__ = (
B"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
B"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
B"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
B"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(__A )
assert zipfile.is_zipfile(str(__A ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__A ) # but we're right
| 143 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __UpperCamelCase :
def __init__( self , __a , __a=2 , __a=True , __a=False , __a=10 , __a=3 , __a=32 * 8 , __a=32 * 8 , __a=4 , __a=64 , ):
'''simple docstring'''
__a : Any = parent
__a : Any = batch_size
__a : Tuple = is_training
__a : Optional[int] = use_auxiliary_loss
__a : List[Any] = num_queries
__a : Any = num_channels
__a : str = min_size
__a : str = max_size
__a : Any = num_labels
__a : Union[str, Any] = hidden_dim
__a : Optional[Any] = hidden_dim
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__a )
__a : str = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__a )
__a : Optional[int] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__a ) > 0.5
).float()
__a : Tuple = (torch.rand((self.batch_size, self.num_labels) , device=__a ) > 0.5).long()
__a : Union[str, Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__a : Dict = self.num_queries
__a : List[str] = self.num_labels
__a : List[str] = [1, 1, 1, 1]
__a : Any = self.num_channels
__a : int = 64
__a : Optional[int] = 128
__a : str = self.hidden_dim
__a : int = self.hidden_dim
__a : List[Any] = self.hidden_dim
return config
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a , __a , __a , __a : int = self.prepare_config_and_inputs()
__a : List[Any] = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : Tuple = output.encoder_hidden_states
__a : Dict = output.pixel_decoder_hidden_states
__a : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__a ) , config.decoder_layers )
def __UpperCAmelCase ( self , __a , __a , __a , __a=False ):
'''simple docstring'''
with torch.no_grad():
__a : List[str] = MaskaFormerModel(config=__a )
model.to(__a )
model.eval()
__a : List[str] = model(pixel_values=__a , pixel_mask=__a )
__a : Optional[int] = model(__a , output_hidden_states=__a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__a , __a )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = MaskaFormerForUniversalSegmentation(config=__a )
model.to(__a )
model.eval()
def comm_check_on_output(__a ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__a : Union[str, Any] = model(pixel_values=__a , pixel_mask=__a )
__a : Optional[Any] = model(__a )
comm_check_on_output(__a )
__a : Dict = model(
pixel_values=__a , pixel_mask=__a , mask_labels=__a , class_labels=__a )
comm_check_on_output(__a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
A_ = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = MaskaFormerModelTester(self )
__a : str = ConfigTester(self , config_class=__a , has_text_modality=__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__a , **__a , output_hidden_states=__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__a )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Optional[int] = model_class(__a )
__a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Tuple = [*signature.parameters.keys()]
__a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__a : Optional[Any] = MaskaFormerModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = (self.model_tester.min_size,) * 2
__a : Optional[int] = {
'pixel_values': torch.randn((2, 3, *size) , device=__a ),
'mask_labels': torch.randn((2, 10, *size) , device=__a ),
'class_labels': torch.zeros(2 , 10 , device=__a ).long(),
}
__a : Dict = self.model_tester.get_config()
__a : List[Any] = MaskaFormerForUniversalSegmentation(__a ).to(__a )
__a : List[str] = model(**__a )
self.assertTrue(outputs.loss is not None )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__a , **__a , output_hidden_states=__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Optional[int] = model_class(__a ).to(__a )
__a : List[str] = model(**__a , output_attentions=__a )
self.assertTrue(outputs.attentions is not None )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__a : List[str] = self.all_model_classes[1]
__a , __a , __a , __a , __a : List[Any] = self.model_tester.prepare_config_and_inputs()
__a : List[str] = model_class(__a )
model.to(__a )
model.train()
__a : Union[str, Any] = model(__a , mask_labels=__a , class_labels=__a ).loss
loss.backward()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.all_model_classes[1]
__a , __a , __a , __a , __a : List[Any] = self.model_tester.prepare_config_and_inputs()
__a : List[Any] = True
__a : Optional[int] = True
__a : Optional[int] = model_class(__a ).to(__a )
model.train()
__a : Optional[int] = model(__a , mask_labels=__a , class_labels=__a )
__a : Dict = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__a : List[str] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__a : Dict = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__a : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowercase : Any = 1E-4
def lowerCamelCase ():
__a : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__a )
__a : List[Any] = self.default_image_processor
__a : Dict = prepare_img()
__a : Union[str, Any] = image_processor(__a , return_tensors='pt' ).to(__a )
__a : int = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__a , (1, 3, 384, 384) )
with torch.no_grad():
__a : List[str] = model(**__a )
__a : str = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(__a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __a , atol=__a ) )
__a : Optional[int] = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(__a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __a , atol=__a ) )
__a : Dict = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(__a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __a , atol=__a ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__a ).eval()
__a : List[Any] = self.default_image_processor
__a : Optional[Any] = prepare_img()
__a : Optional[int] = image_processor(__a , return_tensors='pt' ).to(__a )
__a : Optional[int] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__a , (1, 3, 384, 384) )
with torch.no_grad():
__a : Union[str, Any] = model(**__a )
# masks_queries_logits
__a : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__a : Dict = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
__a : List[str] = torch.tensor(__a ).to(__a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __a , atol=__a ) )
# class_queries_logits
__a : Optional[Any] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__a : Tuple = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(__a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __a , atol=__a ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__a ).eval()
__a : Optional[int] = self.default_image_processor
__a : Tuple = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
__a : Any = inputs['pixel_values'].to(__a )
__a : int = [el.to(__a ) for el in inputs['mask_labels']]
__a : List[Any] = [el.to(__a ) for el in inputs['class_labels']]
with torch.no_grad():
__a : Union[str, Any] = model(**__a )
self.assertTrue(outputs.loss is not None )
| 27 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a__: List[str] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''A painting of a squirrel eating a burger '''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=2,output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCamelCase )
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = generator.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=2,output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase ( self ):
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''',torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''A painting of a squirrel eating a burger '''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=50,output_type='''numpy''' ).images
A__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 193 | 0 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __magic_name__( ):
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''', type=lowerCamelCase, default='''microsoft/unixcoder-base-nine''')
parser.add_argument('''--num_epochs''', type=lowerCamelCase, default=5)
parser.add_argument('''--batch_size''', type=lowerCamelCase, default=6)
parser.add_argument('''--gradient_accumulation_steps''', type=lowerCamelCase, default=1)
parser.add_argument('''--freeze''', type=lowerCamelCase, default=lowerCamelCase)
parser.add_argument('''--learning_rate''', type=lowerCamelCase, default=5E-4)
parser.add_argument('''--seed''', type=lowerCamelCase, default=0)
parser.add_argument('''--lr_scheduler_type''', type=lowerCamelCase, default='''cosine''')
parser.add_argument('''--num_warmup_steps''', type=lowerCamelCase, default=1_0)
parser.add_argument('''--weight_decay''', type=lowerCamelCase, default=0.01)
parser.add_argument('''--output_dir''', type=lowerCamelCase, default='''./results''')
return parser.parse_args()
_UpperCAmelCase : List[Any] = load("""accuracy""")
def __magic_name__( lowerCamelCase):
__lowerCAmelCase , __lowerCAmelCase = eval_pred
__lowerCAmelCase = np.argmax(lowerCamelCase, axis=1)
return metric.compute(predictions=lowerCamelCase, references=lowerCamelCase)
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__()
__lowerCAmelCase = trainer
def _snake_case (self , __lowercase , __lowercase , __lowercase , **__lowercase ):
if control.should_evaluate:
__lowerCAmelCase = deepcopy(__lowercase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def __magic_name__( ):
__lowerCAmelCase = get_args()
set_seed(args.seed)
__lowerCAmelCase = load_dataset('''codeparrot/codecomplex''', split='''train''')
__lowerCAmelCase = dataset.train_test_split(test_size=0.2)
__lowerCAmelCase = train_test['''test'''].train_test_split(test_size=0.5)
__lowerCAmelCase = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
})
print('''Loading tokenizer and model''')
__lowerCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt)
__lowerCAmelCase = tokenizer.eos_token
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7)
__lowerCAmelCase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__lowerCAmelCase = False
__lowerCAmelCase = ClassLabel(num_classes=7, names=list(set(train_test_validation['''train''']['''complexity'''])))
def tokenize(lowerCamelCase):
__lowerCAmelCase = tokenizer(example['''src'''], truncation=lowerCamelCase, max_length=1_0_2_4)
__lowerCAmelCase = labels.straint(example['''complexity'''])
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__lowerCAmelCase = train_test_validation.map(
lowerCamelCase, batched=lowerCamelCase, remove_columns=train_test_validation['''train'''].column_names, )
__lowerCAmelCase = DataCollatorWithPadding(tokenizer=lowerCamelCase)
__lowerCAmelCase = TrainingArguments(
output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy='''epoch''', save_strategy='''epoch''', logging_strategy='''epoch''', per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model='''accuracy''', run_name='''complexity-java''', report_to='''wandb''', )
__lowerCAmelCase = Trainer(
model=lowerCamelCase, args=lowerCamelCase, train_dataset=tokenized_datasets['''train'''], eval_dataset=tokenized_datasets['''valid'''], tokenizer=lowerCamelCase, data_collator=lowerCamelCase, compute_metrics=lowerCamelCase, )
print('''Training...''')
trainer.add_callback(CustomCallback(lowerCamelCase))
trainer.train()
if __name__ == "__main__":
main()
| 9 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_UpperCAmelCase : Dict = """true"""
def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=1_6):
set_seed(4_2)
__lowerCAmelCase = RegressionModel()
__lowerCAmelCase = deepcopy(lowerCamelCase)
__lowerCAmelCase = RegressionDataset(length=lowerCamelCase)
__lowerCAmelCase = DataLoader(lowerCamelCase, batch_size=lowerCamelCase)
model.to(accelerator.device)
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase)
return model, ddp_model, dataloader
def __magic_name__( lowerCamelCase, lowerCamelCase=False):
__lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''')
__lowerCAmelCase = load_dataset('''glue''', '''mrpc''', split='''validation''')
def tokenize_function(lowerCamelCase):
__lowerCAmelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowerCamelCase, max_length=lowerCamelCase)
return outputs
with accelerator.main_process_first():
__lowerCAmelCase = dataset.map(
lowerCamelCase, batched=lowerCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
__lowerCAmelCase = tokenized_datasets.rename_column('''label''', '''labels''')
def collate_fn(lowerCamelCase):
if use_longest:
return tokenizer.pad(lowerCamelCase, padding='''longest''', return_tensors='''pt''')
return tokenizer.pad(lowerCamelCase, padding='''max_length''', max_length=1_2_8, return_tensors='''pt''')
return DataLoader(lowerCamelCase, shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=1_6)
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = Accelerator(dispatch_batches=lowerCamelCase, split_batches=lowerCamelCase)
__lowerCAmelCase = get_dataloader(lowerCamelCase, not dispatch_batches)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''', return_dict=lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase)
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = []
for batch in dataloader:
__lowerCAmelCase , __lowerCAmelCase = batch.values()
with torch.no_grad():
__lowerCAmelCase = model(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((logit, target))
logits_and_targets.append((logit, target))
__lowerCAmelCase , __lowerCAmelCase = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCamelCase)
targs.append(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = torch.cat(lowerCamelCase), torch.cat(lowerCamelCase)
return logits, targs
def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=1_6):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_basic_setup(lowerCamelCase, lowerCamelCase, lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = generate_predictions(lowerCamelCase, lowerCamelCase, lowerCamelCase)
assert (
len(lowerCamelCase) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase)}"""
def __magic_name__( lowerCamelCase = False, lowerCamelCase = False):
__lowerCAmelCase = evaluate.load('''glue''', '''mrpc''')
__lowerCAmelCase , __lowerCAmelCase = get_mrpc_setup(lowerCamelCase, lowerCamelCase)
# First do baseline
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''no''']
model.to(lowerCamelCase)
model.eval()
for batch in dataloader:
batch.to(lowerCamelCase)
with torch.inference_mode():
__lowerCAmelCase = model(**lowerCamelCase)
__lowerCAmelCase = outputs.logits.argmax(dim=-1)
metric.add_batch(predictions=lowerCamelCase, references=batch['''labels'''])
__lowerCAmelCase = metric.compute()
# Then do distributed
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCAmelCase = model(**lowerCamelCase)
__lowerCAmelCase = outputs.logits.argmax(dim=-1)
__lowerCAmelCase = batch['''labels''']
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((preds, references))
metric.add_batch(predictions=lowerCamelCase, references=lowerCamelCase)
__lowerCAmelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key], distributed[key]), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def __magic_name__( ):
__lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""")
test_mrpc(lowerCamelCase, lowerCamelCase)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase)
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""")
test_torch_metrics(lowerCamelCase, 9_9)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''')
__lowerCAmelCase = Accelerator()
test_torch_metrics(lowerCamelCase, 5_1_2)
accelerator.state._reset_state()
def __magic_name__( lowerCamelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 | 1 |
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ = []
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> bool:
for i in range(len(lowerCAmelCase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowerCAmelCase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowerCAmelCase__ , -1 , -1 ) , range(lowerCAmelCase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowerCAmelCase__ , -1 , -1 ) , range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ) ):
if board[i][j] == 1:
return False
return True
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> bool:
if row >= len(lowerCAmelCase__ ):
solution.append(lowerCAmelCase__ )
printboard(lowerCAmelCase__ )
print()
return True
for i in range(len(lowerCAmelCase__ ) ):
if is_safe(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[int] = 1
solve(lowerCAmelCase__ , row + 1 )
UpperCAmelCase__ : Tuple = 0
return False
def a__ ( lowerCAmelCase__ ) -> None:
for i in range(len(lowerCAmelCase__ ) ):
for j in range(len(lowerCAmelCase__ ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
UpperCamelCase__ = 8
UpperCamelCase__ = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 181 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ ( __a ):
def __init__( self : Optional[Any] , _A : Optional[Any] , _A : List[str]=13 , _A : Any=7 , _A : str=True , _A : Any=True , _A : Any=True , _A : Optional[int]=True , _A : int=99 , _A : Optional[int]=32 , _A : List[Any]=5 , _A : Optional[Any]=4 , _A : Dict=37 , _A : Any="gelu" , _A : str=0.1 , _A : int=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=16 , _A : List[Any]=2 , _A : str=0.0_2 , _A : Optional[Any]=False , _A : Any=True , _A : Dict="None" , _A : List[str]=3 , _A : List[str]=4 , _A : Tuple=None , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : Dict = is_training
UpperCAmelCase__ : Optional[Any] = use_input_mask
UpperCAmelCase__ : Optional[Any] = use_token_type_ids
UpperCAmelCase__ : Union[str, Any] = use_labels
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : Tuple = hidden_size
UpperCAmelCase__ : Any = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Any = hidden_dropout_prob
UpperCAmelCase__ : Any = attention_probs_dropout_prob
UpperCAmelCase__ : int = max_position_embeddings
UpperCAmelCase__ : Optional[int] = type_vocab_size
UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : Any = num_labels
UpperCAmelCase__ : Optional[Any] = num_choices
UpperCAmelCase__ : List[Any] = relative_attention
UpperCAmelCase__ : int = position_biased_input
UpperCAmelCase__ : str = pos_att_type
UpperCAmelCase__ : Union[str, Any] = scope
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Any = None
if self.use_input_mask:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase__ : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : Dict = None
if self.use_labels:
UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowercase_ ( self : Dict , _A : Optional[int] ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowercase_ ( self : int , _A : int , _A : Any , _A : Tuple , _A : List[Any] , _A : str , _A : Union[str, Any] , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DebertaVaModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : str = model(_A , attention_mask=_A , token_type_ids=_A )[0]
UpperCAmelCase__ : List[str] = model(_A , token_type_ids=_A )[0]
UpperCAmelCase__ : List[Any] = model(_A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : List[Any] , _A : Optional[Any] , _A : int , _A : List[Any] , _A : Optional[int] , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = DebertaVaForMaskedLM(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : str , _A : str , _A : Any , _A : Any , _A : List[Any] , _A : Dict , _A : Tuple , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : Union[str, Any] = DebertaVaForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : str = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_A )
def lowercase_ ( self : Any , _A : List[str] , _A : List[str] , _A : Optional[int] , _A : Tuple , _A : Dict , _A : List[str] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : int = DebertaVaForTokenClassification(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[Any] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : str , _A : List[str] , _A : str , _A : Optional[int] , _A : Optional[int] , _A : Union[str, Any] , _A : Dict , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = DebertaVaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : int = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Any , _A : Tuple , _A : Optional[int] , _A : Optional[int] , _A : str , _A : List[str] , _A : Any , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DebertaVaForMultipleChoice(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : List[str] = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Any = config_and_inputs
UpperCAmelCase__ : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DebertaVaModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_A )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*_A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = DebertaVaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
UpperCAmelCase__ : List[Any] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
UpperCAmelCase__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(_A , attention_mask=_A )[0]
# compare the actual values for a slice.
UpperCAmelCase__ : str = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 181 | 1 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowerCamelCase ( _A ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase_ : Callable , lowerCAmelCase_ : Optional[Features] = None , lowerCAmelCase_ : str = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[dict] = None , lowerCAmelCase_ : Optional[int] = None , **lowerCAmelCase_ : str , ) -> int:
'''simple docstring'''
super().__init__(
features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
A__ : Optional[int] =Generator(
cache_dir=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , gen_kwargs=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
# Build iterable dataset
if self.streaming:
A__ : Dict =self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
A__ : Union[str, Any] =None
A__ : Optional[Any] =None
A__ : str =None
A__ : int =None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
A__ : str =self.builder.as_dataset(
split="""train""" , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
| 363 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
A__ : int =tempfile.mkdtemp()
# fmt: off
A__ : Optional[int] =["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A__ : List[Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : Tuple =["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A__ : int ={"""unk_token""": """<unk>"""}
A__ : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
A__ : Dict ={
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
A__ : Dict =os.path.join(self.tmpdirname , lowerCAmelCase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , **lowerCAmelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , **lowerCAmelCase_ : str ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A__ : Dict =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ : int =[Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A__ : int =self.get_tokenizer()
A__ : Optional[int] =self.get_rust_tokenizer()
A__ : Any =self.get_image_processor()
A__ : int =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
A__ : Dict =CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase_ )
A__ : int =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
A__ : Optional[int] =CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
A__ : List[str] =CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ : List[Any] =self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ : Union[str, Any] =self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
A__ : Optional[int] =CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
A__ : Optional[Any] =self.get_image_processor()
A__ : int =self.get_tokenizer()
A__ : Optional[int] =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Dict =self.prepare_image_inputs()
A__ : List[Any] =image_processor(lowerCAmelCase_ , return_tensors="""np""" )
A__ : List[Any] =processor(images=lowerCAmelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ : Any =self.get_image_processor()
A__ : Optional[Any] =self.get_tokenizer()
A__ : int =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Any ="""lower newer"""
A__ : Optional[int] =processor(text=lowerCAmelCase_ )
A__ : Optional[int] =tokenizer(lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
A__ : Any =self.get_image_processor()
A__ : Optional[Any] =self.get_tokenizer()
A__ : Union[str, Any] =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Optional[Any] ="""lower newer"""
A__ : List[str] =self.prepare_image_inputs()
A__ : Optional[Any] =processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : Optional[Any] =self.get_image_processor()
A__ : List[str] =self.get_tokenizer()
A__ : Optional[int] =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Tuple =self.prepare_image_inputs()
A__ : str =self.prepare_image_inputs()
A__ : int =processor(images=lowerCAmelCase_ , visual_prompt=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
A__ : List[str] =self.get_image_processor()
A__ : Optional[int] =self.get_tokenizer()
A__ : Any =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : List[Any] =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ : Tuple =processor.batch_decode(lowerCAmelCase_ )
A__ : List[Any] =tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 136 | 0 |
_UpperCamelCase = 256
# Modulus to hash a string
_UpperCamelCase = 100_0003
def lowerCAmelCase__( lowercase : str , lowercase : str ) -> bool:
__snake_case : Tuple = len(lowercase )
__snake_case : List[Any] = len(lowercase )
if p_len > t_len:
return False
__snake_case : Optional[int] = 0
__snake_case : Optional[int] = 0
__snake_case : Optional[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase ):
__snake_case : Dict = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__snake_case : Union[str, Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__snake_case : List[str] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__snake_case : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowerCAmelCase__( ) -> None:
__snake_case : str = "abc1abc12"
__snake_case : str = "alskfjaldsabc1abc1abc12k23adsfabcabc"
__snake_case : Any = "alskfjaldsk23adsfabcabc"
assert rabin_karp(lowercase , lowercase ) and not rabin_karp(lowercase , lowercase )
# Test 2)
__snake_case : Dict = "ABABX"
__snake_case : Optional[Any] = "ABABZABABYABABX"
assert rabin_karp(lowercase , lowercase )
# Test 3)
__snake_case : Union[str, Any] = "AAAB"
__snake_case : Any = "ABAAAAAB"
assert rabin_karp(lowercase , lowercase )
# Test 4)
__snake_case : Tuple = "abcdabcy"
__snake_case : Dict = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(lowercase , lowercase )
# Test 5)
__snake_case : Tuple = "Lü"
__snake_case : int = "Lüsai"
assert rabin_karp(lowercase , lowercase )
__snake_case : Any = "Lue"
assert not rabin_karp(lowercase , lowercase )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 326 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCAmelCase__( lowercase : Dict , lowercase : bool = True , lowercase : float = math.inf , lowercase : float = -math.inf , lowercase : float = math.inf , lowercase : float = -math.inf , lowercase : bool = False , lowercase : float = 100 , lowercase : float = 0.0_1 , lowercase : float = 1 , ) -> Any:
__snake_case : Optional[Any] = False
__snake_case : Optional[Any] = search_prob
__snake_case : str = start_temperate
__snake_case : List[Any] = []
__snake_case : str = 0
__snake_case : Dict = None
while not search_end:
__snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
__snake_case : List[Any] = current_state
scores.append(lowercase )
iterations += 1
__snake_case : Dict = None
__snake_case : str = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__snake_case : Any = random.randint(0 , len(lowercase ) - 1 ) # picking a random neighbor
__snake_case : int = neighbors.pop(lowercase )
__snake_case : Optional[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__snake_case : Any = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__snake_case : List[str] = picked_neighbor
else:
__snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__snake_case : str = picked_neighbor
__snake_case : Optional[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__snake_case : Optional[Any] = True
else:
__snake_case : str = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowercase ) , lowercase )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCAmelCase__( lowercase : List[str] , lowercase : Tuple ) -> str:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_UpperCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_UpperCamelCase = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
_UpperCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_UpperCamelCase = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def lowerCAmelCase__( lowercase : Any , lowercase : Union[str, Any] ) -> Any:
return (3 * x**2) - (6 * y)
_UpperCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_UpperCamelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'''{local_min.score()}'''
)
_UpperCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_UpperCamelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'''{local_min.score()}'''
)
| 326 | 1 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : str=() , __lowerCAmelCase : str=None , __lowerCAmelCase : List[str]="no" , __lowerCAmelCase : List[Any]="29500" ):
a__ = False
a__ = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
a__ = True
elif "IPython" in sys.modules:
a__ = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
a__ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , _lowerCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
a__ = 8
a__ = PrepareForLaunch(_lowerCAmelCase , distributed_type='TPU' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(_lowerCAmelCase , args=_lowerCAmelCase , nprocs=_lowerCAmelCase , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*_lowerCAmelCase )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowerCAmelCase , master_addr='127.0.01' , master_port=_lowerCAmelCase , mixed_precision=_lowerCAmelCase ):
a__ = PrepareForLaunch(_lowerCAmelCase , distributed_type='MULTI_GPU' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(_lowerCAmelCase , args=_lowerCAmelCase , nprocs=_lowerCAmelCase , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
a__ = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*_lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : str=() , __lowerCAmelCase : Tuple=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowerCAmelCase , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
a__ = PrepareForLaunch(_lowerCAmelCase , debug=_lowerCAmelCase )
start_processes(_lowerCAmelCase , args=_lowerCAmelCase , nprocs=_lowerCAmelCase , start_method='fork' )
| 354 |
snake_case : str = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def __lowercase ( __lowerCAmelCase : float ):
assert type(__lowerCAmelCase ) in (int, float) and decimal == int(__lowerCAmelCase )
a__ = int(__lowerCAmelCase )
a__ = ''
a__ = False
if decimal < 0:
a__ = True
decimal *= -1
while decimal > 0:
a__ , a__ = divmod(__lowerCAmelCase , 1_6 )
a__ = values[remainder] + hexadecimal
a__ = '0x' + hexadecimal
if negative:
a__ = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : int = 0
__UpperCamelCase : bool = False
__UpperCamelCase : float = 3.0
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=SCREAMING_SNAKE_CASE__ ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} )
@require_cuda
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = GradScalerKwargs(init_scale=10_24 , growth_factor=2 )
AcceleratorState._reset_state()
SCREAMING_SNAKE_CASE__ : Tuple = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
SCREAMING_SNAKE_CASE__ : str = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 20_00 )
self.assertEqual(scaler._enabled , SCREAMING_SNAKE_CASE__ )
@require_multi_gpu
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[Any] = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
UpperCAmelCase__ : Union[str, Any] = Accelerator(kwargs_handlers=[ddp_scaler])
UpperCAmelCase__ : Any = torch.nn.Linear(1_0_0, 2_0_0)
UpperCAmelCase__ : Optional[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
UpperCAmelCase__ : Tuple = ''
UpperCAmelCase__ : List[str] = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 25 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = IFPipeline
__UpperCamelCase : Dict = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__UpperCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
return self._get_dummy_components()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> List[Any]:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_local()
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE__ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 25 | 1 |
from collections import deque
def lowerCAmelCase_ ( __lowerCAmelCase )-> str:
'''simple docstring'''
UpperCAmelCase : Tuple =len(__lowerCAmelCase )
UpperCAmelCase : str =deque()
UpperCAmelCase : List[str] =[False for _ in range(__lowerCAmelCase )]
UpperCAmelCase : List[Any] =[-1 for _ in range(__lowerCAmelCase )]
UpperCAmelCase : List[Any] =index_of[:]
def strong_connect(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : Optional[Any] =index # the number when this node is seen
UpperCAmelCase : str =index # lowest rank node reachable from here
index += 1
stack.append(__lowerCAmelCase )
UpperCAmelCase : Any =True
for w in g[v]:
if index_of[w] == -1:
UpperCAmelCase : str =strong_connect(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : Optional[Any] =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCAmelCase : Tuple =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCAmelCase : Optional[int] =[]
UpperCAmelCase : Tuple =stack.pop()
UpperCAmelCase : Dict =False
component.append(__lowerCAmelCase )
while w != v:
UpperCAmelCase : Optional[int] =stack.pop()
UpperCAmelCase : Any =False
component.append(__lowerCAmelCase )
components.append(__lowerCAmelCase )
return index
UpperCAmelCase : Any =[]
for v in range(__lowerCAmelCase ):
if index_of[v] == -1:
strong_connect(__lowerCAmelCase , 0 , __lowerCAmelCase )
return components
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> int:
'''simple docstring'''
UpperCAmelCase : List[Any] =[[] for _ in range(__lowerCAmelCase )]
for u, v in edges:
g[u].append(__lowerCAmelCase )
return g
if __name__ == "__main__":
# Test
__snake_case = 7
__snake_case = [0, 0, 1, 2, 3, 3, 4, 4, 6]
__snake_case = [1, 3, 2, 0, 1, 4, 5, 6, 5]
__snake_case = [(u, v) for u, v in zip(source, target)]
__snake_case = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g) | 359 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case = logging.get_logger(__name__)
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""pixel_values"""]
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = None , snake_case__ = None , snake_case__ = True , **snake_case__ , ) -> None:
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase : List[str] =size if size is not None else {'''height''': 384, '''width''': 384}
UpperCAmelCase : List[str] =get_size_dict(snake_case__ , default_to_square=snake_case__ )
UpperCAmelCase : List[str] =do_resize
UpperCAmelCase : Tuple =size
UpperCAmelCase : Optional[Any] =resample
UpperCAmelCase : Optional[Any] =do_rescale
UpperCAmelCase : Dict =rescale_factor
UpperCAmelCase : Union[str, Any] =do_normalize
UpperCAmelCase : Dict =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase : Any =image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase : List[Any] =do_convert_rgb
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = None , **snake_case__ , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase : int =get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
UpperCAmelCase : Union[str, Any] =(size['''height'''], size['''width'''])
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ) -> Optional[int]:
'''simple docstring'''
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ) -> np.ndarray:
'''simple docstring'''
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase : List[str] =do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : Union[str, Any] =resample if resample is not None else self.resample
UpperCAmelCase : Any =do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : int =rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase : Union[str, Any] =do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase : List[str] =image_mean if image_mean is not None else self.image_mean
UpperCAmelCase : List[Any] =image_std if image_std is not None else self.image_std
UpperCAmelCase : Optional[Any] =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase : List[Any] =size if size is not None else self.size
UpperCAmelCase : Tuple =get_size_dict(snake_case__ , default_to_square=snake_case__ )
UpperCAmelCase : int =make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase : Optional[int] =[convert_to_rgb(snake_case__ ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase : str =[to_numpy_array(snake_case__ ) for image in images]
if do_resize:
UpperCAmelCase : List[Any] =[self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_rescale:
UpperCAmelCase : int =[self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
UpperCAmelCase : Dict =[self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
UpperCAmelCase : Optional[int] =[to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
UpperCAmelCase : str =BatchFeature(data={'''pixel_values''': images} , tensor_type=snake_case__ )
return encoded_outputs
| 78 | 0 |
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Tuple = []
for part_id in partition_order:
_lowerCamelCase : Union[str, Any] = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(snake_case__ ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _snake_case ( ):
_lowerCamelCase : Tuple = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_lowerCamelCase : Optional[Any] = spark.range(100 ).repartition(1 )
_lowerCamelCase : List[Any] = Spark(snake_case__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _snake_case ( ):
_lowerCamelCase : Union[str, Any] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_lowerCamelCase : Optional[int] = spark.range(10 ).repartition(2 )
_lowerCamelCase : int = [1, 0]
_lowerCamelCase : Tuple = _generate_iterable_examples(snake_case__ , snake_case__ ) # Reverse the partitions.
_lowerCamelCase : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , snake_case__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
_lowerCamelCase, _lowerCamelCase : Dict = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _snake_case ( ):
_lowerCamelCase : Optional[int] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_lowerCamelCase : Dict = spark.range(10 ).repartition(1 )
_lowerCamelCase : str = SparkExamplesIterable(snake_case__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case__ ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _snake_case ( ):
_lowerCamelCase : Dict = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_lowerCamelCase : List[str] = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
_lowerCamelCase : Optional[Any] = lambda lowercase__ : x.reverse()
_lowerCamelCase : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [2, 1, 0] )
_lowerCamelCase : Any = SparkExamplesIterable(snake_case__ ).shuffle_data_sources(snake_case__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case__ ):
_lowerCamelCase, _lowerCamelCase : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _snake_case ( ):
_lowerCamelCase : List[str] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_lowerCamelCase : Optional[int] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
_lowerCamelCase : Optional[int] = SparkExamplesIterable(snake_case__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
_lowerCamelCase : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(snake_case__ ):
_lowerCamelCase, _lowerCamelCase : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_lowerCamelCase : Optional[int] = SparkExamplesIterable(snake_case__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
_lowerCamelCase : int = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(snake_case__ ):
_lowerCamelCase, _lowerCamelCase : List[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _snake_case ( ):
_lowerCamelCase : Optional[int] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_lowerCamelCase : Optional[Any] = spark.range(100 ).repartition(1 )
_lowerCamelCase : Optional[int] = Spark(snake_case__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100 | 96 | import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Dict = WavaVecaPhonemeCTCTokenizer
lowerCamelCase :Optional[int] = False
def UpperCAmelCase ( self ) -> Optional[int]:
super().setUp()
_A = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=20 , lowerCAmelCase_=5 ) -> Tuple[str, list]:
_A = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase_ )) for i in range(len(lowerCAmelCase_ ) )]
_A = list(filter(lambda lowerCAmelCase_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowerCAmelCase_ ) , lowerCAmelCase_ ) )
if max_length is not None and len(lowerCAmelCase_ ) > max_length:
_A = toks[:max_length]
if min_length is not None and len(lowerCAmelCase_ ) < min_length and len(lowerCAmelCase_ ) > 0:
while len(lowerCAmelCase_ ) < min_length:
_A = toks + toks
# toks_str = [t[1] for t in toks]
_A = [t[0] for t in toks]
# Ensure consistency
_A = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
if " " not in output_txt and len(lowerCAmelCase_ ) > 1:
_A = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase_ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase_ )
)
if with_prefix_space:
_A = """ """ + output_txt
_A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
return output_txt, output_ids
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Any:
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
_A = tokenizer("""m xxx ɪ""" , do_phonemize=lowerCAmelCase_ ).input_ids
self.assertEqual(lowerCAmelCase_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
_A = tokenizer("""m aaa ɪ ccc""" , do_phonemize=lowerCAmelCase_ ).input_ids
self.assertEqual(lowerCAmelCase_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
_A = tokenizer("""maɪ c""" , do_phonemize=lowerCAmelCase_ ).input_ids
self.assertEqual(lowerCAmelCase_ , [3, 2_00] ) # mai should be <unk> (=3)
def UpperCAmelCase ( self ) -> int:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
self.assertEqual(lowerCAmelCase_ , """h ə l oʊ h aʊ ɑːɹ j uː""" )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowerCAmelCase_ ).input_ids , tokenizer(lowerCAmelCase_ , do_phonemize=lowerCAmelCase_ ).input_ids )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
_A = tokenizer.decode(tokenizer(lowerCAmelCase_ ).input_ids )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
_A = tokenizer.decode(sample_ids[0] )
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , batch_tokens[0] )
self.assertEqual(lowerCAmelCase_ , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def UpperCAmelCase ( self ) -> str:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
self.assertEqual(lowerCAmelCase_ , """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowerCAmelCase_ ).input_ids , tokenizer(lowerCAmelCase_ , do_phonemize=lowerCAmelCase_ ).input_ids )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
_A = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
_A = tokenizer.decode(sample_ids[0] )
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , batch_tokens[0] )
self.assertEqual(lowerCAmelCase_ , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
_A = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowerCAmelCase_ )
_A = tokenizer.batch_decode(lowerCAmelCase_ , filter_word_delimiter_token=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , batch_tokens[0] )
self.assertEqual(lowerCAmelCase_ , ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def UpperCAmelCase ( self ) -> Dict:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
_A = tokenizer.decode(tokenizer(lowerCAmelCase_ ).input_ids , filter_word_delimiter_token=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
_A = tokenizer.decode(tokenizer(lowerCAmelCase_ ).input_ids , filter_word_delimiter_token=lowerCAmelCase_ )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=lowerCAmelCase_ )
_A = """Hello how are you"""
_A = tokenizer(lowerCAmelCase_ , phonemizer_lang="""en-us""" ).input_ids
_A = tokenizer(lowerCAmelCase_ , phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_A = tokenizer.decode(lowerCAmelCase_ )
_A = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(lowerCAmelCase_ , """ɛ l o h aʊ a ʁ j u""" )
def UpperCAmelCase ( self ) -> Any:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = """Hello how Are you"""
_A = """hello how are you"""
_A = tokenizer(lowerCAmelCase_ ).input_ids
_A = tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
_A = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_A = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase ( self ) -> Tuple:
_A = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
_A = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
_A = tokenizer.decode(lowerCAmelCase_ , output_char_offsets=lowerCAmelCase_ , filter_word_delimiter_token=lowerCAmelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertTrue(isinstance(outputs_list[0] , lowerCAmelCase_ ) )
# transform list to ModelOutput
_A = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] )
def recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
[recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ) for la, la in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] )
# fmt: off
_A = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
_A = tokenizer.batch_decode(lowerCAmelCase_ , output_char_offsets=lowerCAmelCase_ )
_A = [tokenizer.decode(lowerCAmelCase_ , output_char_offsets=lowerCAmelCase_ ) for ids in sample_ids]
check_list_tuples_equal(lowerCAmelCase_ , lowerCAmelCase_ )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def UpperCAmelCase ( self ) -> int:
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def UpperCAmelCase ( self ) -> List[str]:
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def UpperCAmelCase ( self ) -> Optional[int]:
pass
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = tokenizer.vocab_size
_A = len(lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_A = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
_A = tokenizer.add_tokens(lowerCAmelCase_ )
_A = tokenizer.vocab_size
_A = len(lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , 0 )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
self.assertEqual(lowerCAmelCase_ , all_size + len(lowerCAmelCase_ ) )
_A = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=lowerCAmelCase_ )
self.assertGreaterEqual(len(lowerCAmelCase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_A = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
_A = tokenizer.add_special_tokens(lowerCAmelCase_ )
_A = tokenizer.vocab_size
_A = len(lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , 0 )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
self.assertEqual(lowerCAmelCase_ , all_size_a + len(lowerCAmelCase_ ) )
_A = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=lowerCAmelCase_ )
self.assertGreaterEqual(len(lowerCAmelCase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def UpperCAmelCase ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase ( self ) -> str:
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
_A = self.get_tokenizers(fast=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
_A = tokenizer.convert_tokens_to_string(lowerCAmelCase_ )
self.assertIsInstance(output["""text"""] , lowerCAmelCase_ )
| 180 | 0 |
import re
def __A ( _lowercase ):
'''simple docstring'''
if len(re.findall('''[ATCG]''' , _lowercase ) ) != len(_lowercase ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Union[str, Any] ) -> Union[str, Any]:
_A = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_A = dict(zip(__A , range(len(__A ) ) ) )
_A = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_A = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_A = tempfile.mkdtemp()
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(self.tmpdirname , __A )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
# load decoder from hub
_A = '''hf-internal-testing/ngram-beam-search-decoder'''
def __A ( self: Tuple , **__A: str ) -> str:
_A = self.add_kwargs_tokens_map.copy()
kwargs.update(__A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__A )
def __A ( self: Any , **__A: List[Any] ) -> Union[str, Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__A )
def __A ( self: List[Any] , **__A: Union[str, Any] ) -> int:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__A )
def __A ( self: List[str] ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __A ( self: List[str] ) -> Optional[Any]:
_A = self.get_tokenizer()
_A = self.get_feature_extractor()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
processor.save_pretrained(self.tmpdirname )
_A = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __A )
def __A ( self: Optional[int] ) -> Union[str, Any]:
_A = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_A = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __A ( self: str ) -> Any:
_A = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(__A , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=__A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __A ( self: List[str] ) -> str:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = floats_list((3, 10_00) )
_A = feature_extractor(__A , return_tensors='''np''' )
_A = processor(__A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self: Union[str, Any] ) -> Optional[Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = '''This is a test string'''
_A = processor(text=__A )
_A = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self: List[str] , __A: Optional[int]=(2, 10, 16) , __A: Optional[int]=77 ) -> List[Any]:
np.random.seed(__A )
return np.random.rand(*__A )
def __A ( self: List[Any] ) -> Optional[Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_A = processor.decode(__A )
_A = decoder.decode_beams(__A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def __A ( self: str , __A: Any ) -> int:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_A = processor.batch_decode(__A )
else:
with get_context(__A ).Pool() as pool:
_A = processor.batch_decode(__A , __A )
_A = list(__A )
with get_context('''fork''' ).Pool() as p:
_A = decoder.decode_beams_batch(__A , __A )
_A ,_A ,_A = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__A , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(__A , decoded_processor.logit_score )
self.assertListEqual(__A , decoded_processor.lm_score )
def __A ( self: Optional[Any] ) -> int:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = self._get_dummy_logits()
_A = 15
_A = -20.0
_A = -4.0
_A = processor.batch_decode(
__A , beam_width=__A , beam_prune_logp=__A , token_min_logp=__A , )
_A = decoded_processor_out.text
_A = list(__A )
with get_context('''fork''' ).Pool() as pool:
_A = decoder.decode_beams_batch(
__A , __A , beam_width=__A , beam_prune_logp=__A , token_min_logp=__A , )
_A = [d[0][0] for d in decoded_decoder_out]
_A = [d[0][2] for d in decoded_decoder_out]
_A = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__A , __A )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , __A )
self.assertTrue(np.array_equal(__A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __A , atol=1e-3 ) )
self.assertTrue(np.array_equal(__A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , __A , atol=1e-3 ) )
def __A ( self: Optional[int] ) -> Dict:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = self._get_dummy_logits()
_A = 2.0
_A = 5.0
_A = -20.0
_A = True
_A = processor.batch_decode(
__A , alpha=__A , beta=__A , unk_score_offset=__A , lm_score_boundary=__A , )
_A = decoded_processor_out.text
_A = list(__A )
decoder.reset_params(
alpha=__A , beta=__A , unk_score_offset=__A , lm_score_boundary=__A , )
with get_context('''fork''' ).Pool() as pool:
_A = decoder.decode_beams_batch(
__A , __A , )
_A = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__A , __A )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , __A )
_A = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __A )
def __A ( self: int ) -> Optional[Any]:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = processor.decoder.model_container[processor.decoder._model_key]
_A = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_A = os.listdir(__A )
_A = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__A , __A )
def __A ( self: Tuple ) -> Any:
_A = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_A = WavaVecaProcessorWithLM.from_pretrained(__A )
_A = processor.decoder.model_container[processor.decoder._model_key]
_A = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_A = os.listdir(__A )
_A = os.listdir(__A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__A , __A )
def __A ( self: List[str] ) -> Tuple:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = floats_list((3, 10_00) )
_A = processor_wavaveca(__A , return_tensors='''np''' )
_A = processor_auto(__A , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
_A = self._get_dummy_logits()
_A = processor_wavaveca.batch_decode(__A )
_A = processor_auto.batch_decode(__A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __A ( self: Optional[int] ) -> Any:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def __A ( __A: int , __A: List[str] ) -> Union[str, Any]:
_A = [d[key] for d in offsets]
return retrieved_list
def __A ( self: Optional[Any] ) -> int:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = self._get_dummy_logits()[0]
_A = processor.decode(__A , output_word_offsets=__A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__A , __A ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def __A ( self: Optional[Any] ) -> Tuple:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = self._get_dummy_logits()
_A = processor.batch_decode(__A , output_word_offsets=__A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__A , __A ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(__A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __A ( self: Optional[Any] ) -> Optional[Any]:
import torch
_A = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=__A )
_A = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
_A = iter(__A )
_A = next(__A )
_A = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_A = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_A = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
_A = model(__A ).logits.cpu().numpy()
_A = processor.decode(logits[0] , output_word_offsets=__A )
_A = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_A = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_A = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(__A , '''word''' ) ) , __A )
self.assertEqual(''' '''.join(self.get_from_offsets(__A , '''word''' ) ) , output.text )
# output times
_A = torch.tensor(self.get_from_offsets(__A , '''start_time''' ) )
_A = torch.tensor(self.get_from_offsets(__A , '''end_time''' ) )
# fmt: off
_A = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
_A = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__A , __A , atol=0.01 ) )
self.assertTrue(torch.allclose(__A , __A , atol=0.01 ) )
| 75 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
a_ : str = """2020.9.26"""
a_ : Tuple = """xcodz-dot, cclaus, dhruvmanila"""
def a_ ( __snake_case : float , __snake_case : float , __snake_case : float , __snake_case : float , __snake_case : float ) -> tuple[float, float]:
"""simple docstring"""
if not all(isinstance(__snake_case , (float, int) ) for val in locals().values() ):
lowerCamelCase_ =F'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(__snake_case )
lowerCamelCase_ =((x * distance) / (z + distance)) * scale
lowerCamelCase_ =((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def a_ ( __snake_case : float , __snake_case : float , __snake_case : float , __snake_case : str , __snake_case : float ) -> tuple[float, float, float]:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise TypeError('''Axis must be a str''' )
lowerCamelCase_ =locals()
del input_variables["axis"]
if not all(isinstance(__snake_case , (float, int) ) for val in input_variables.values() ):
lowerCamelCase_ =(
'''Input values except axis must either be float or int: '''
F'''{list(input_variables.values() )}'''
)
raise TypeError(__snake_case )
lowerCamelCase_ =(angle % 360) / 450 * 180 / math.pi
if axis == "z":
lowerCamelCase_ =x * math.cos(__snake_case ) - y * math.sin(__snake_case )
lowerCamelCase_ =y * math.cos(__snake_case ) + x * math.sin(__snake_case )
lowerCamelCase_ =z
elif axis == "x":
lowerCamelCase_ =y * math.cos(__snake_case ) - z * math.sin(__snake_case )
lowerCamelCase_ =z * math.cos(__snake_case ) + y * math.sin(__snake_case )
lowerCamelCase_ =x
elif axis == "y":
lowerCamelCase_ =x * math.cos(__snake_case ) - z * math.sin(__snake_case )
lowerCamelCase_ =z * math.cos(__snake_case ) + x * math.sin(__snake_case )
lowerCamelCase_ =y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
| 75 | """simple docstring"""
from scipy.stats import pearsonr
import datasets
_a : str = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
_a : List[str] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
_a : List[Any] = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def __A ( self , a__ , a__ , a__=False ):
if return_pvalue:
_lowerCAmelCase : List[Any] = pearsonr(a__ , a__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(a__ , a__ )[0] )}
| 44 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 364 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE : List[Any] = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE : Tuple = {
"""facebook/mbart-large-en-ro""": 1024,
"""facebook/mbart-large-cc25""": 1024,
}
# fmt: off
SCREAMING_SNAKE_CASE : List[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =['input_ids', 'attention_mask']
lowerCamelCase__ =MBartTokenizer
lowerCamelCase__ =[]
lowerCamelCase__ =[]
def __init__(self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=None , a_=None , a_=None , **a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
super().__init__(
vocab_file=a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , src_lang=a_ , tgt_lang=a_ , additional_special_tokens=a_ , **a_ , )
__snake_case : Tuple = vocab_file
__snake_case : Optional[Any] = False if not self.vocab_file else True
__snake_case : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__snake_case : Optional[int] = {
lang_code: self.convert_tokens_to_ids(a_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__snake_case : List[Any] = src_lang if src_lang is not None else '''en_XX'''
__snake_case : Any = self.convert_tokens_to_ids(self._src_lang )
__snake_case : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
__snake_case : Tuple = [self.sep_token_id]
__snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , **a_ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__snake_case : Optional[int] = src_lang
__snake_case : Tuple = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ )
__snake_case : Union[str, Any] = self.convert_tokens_to_ids(a_ )
__snake_case : int = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE (self , a_ , a_ = "en_XX" , a_ = None , a_ = "ro_RO" , **a_ , ):
'''simple docstring'''
__snake_case : int = src_lang
__snake_case : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(a_ , a_ , **a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(a_ )
__snake_case : List[Any] = []
__snake_case : Any = [self.eos_token_id, self.cur_lang_code]
__snake_case : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(a_ )
__snake_case : Optional[Any] = []
__snake_case : Dict = [self.eos_token_id, self.cur_lang_code]
__snake_case : str = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Any = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
__snake_case : Optional[Any] = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 24 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
__lowerCAmelCase : Union[str, Any] =list[list[float | int]]
def UpperCAmelCase__ ( lowerCAmelCase__ :Matrix , lowerCAmelCase__ :Matrix ) -> Matrix:
'''simple docstring'''
lowercase = len(a_ )
lowercase = [[0 for _ in range(size + 1 )] for _ in range(a_ )]
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
for row in range(a_ ):
for col in range(a_ ):
lowercase = matrix[row][col]
lowercase = vector[row][0]
lowercase = 0
lowercase = 0
while row < size and col < size:
# pivoting
lowercase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(a_ , a_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
lowercase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , a_ ):
lowercase = augmented[rowa][col] / augmented[row][col]
lowercase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , a_ ):
for row in range(a_ ):
lowercase = augmented[row][col] / augmented[col][col]
for cola in range(a_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 1_0 )] for row in range(a_ )
]
def UpperCAmelCase__ ( lowerCAmelCase__ :list[int] ) -> Callable[[int], int]:
'''simple docstring'''
lowercase = len(a_ )
lowercase = [[0 for _ in range(a_ )] for _ in range(a_ )]
lowercase = [[0] for _ in range(a_ )]
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
for x_val, y_val in enumerate(a_ ):
for col in range(a_ ):
lowercase = (x_val + 1) ** (size - col - 1)
lowercase = y_val
lowercase = solve(a_ , a_ )
def interpolated_func(lowerCAmelCase__ :int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(a_ ) )
return interpolated_func
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**1_0
)
def UpperCAmelCase__ ( lowerCAmelCase__ :Callable[[int], int] = question_function , lowerCAmelCase__ :int = 1_0 ) -> int:
'''simple docstring'''
lowercase = [func(a_ ) for x_val in range(1 , order + 1 )]
lowercase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
lowercase = 0
lowercase = 42
lowercase = 42
for poly in polynomials:
lowercase = 1
while func(a_ ) == poly(a_ ):
x_val += 1
ret += poly(a_ )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 197 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 160 | 0 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__lowercase = getLogger(__name__)
def lowercase ( A_ , A_ , A_ , A_ = 8 , A_ = 1_024 , A_="val" , A_=None , A_=False , A_="summarization" , A_=None , A_=1 , A_ = None , A_="" , **A_ , )-> Dict:
'''simple docstring'''
a : Union[str, Any] = str(A_ )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=A_ )
a : Any = Path(A_ )
a : List[str] = save_dir.joinpath(F'''rank_{local_rank}_output.json''' )
torch.cuda.set_device(A_ )
a : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(A_ ).cuda()
if fpaa:
a : str = model.half()
# determine if we need to increase num_beams
use_task_specific_params(A_ , A_ ) # update config with task specific params
a : List[Any] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
a : int = num_return_sequences
a : Union[str, Any] = AutoTokenizer.from_pretrained(A_ )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
if max_source_length is None:
a : Optional[Any] = tokenizer.model_max_length
if prefix is None:
a : List[str] = prefix or getattr(model.config , "prefix" , "" ) or ""
a : List[str] = SeqaSeqDataset(
A_ , A_ , A_ , max_target_length=1_024 , type_path=A_ , n_obs=A_ , prefix=A_ , **A_ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
a : Dict = ds.make_sortish_sampler(A_ , distributed=A_ , add_extra_examples=A_ , shuffle=A_ )
a : Optional[Any] = DataLoader(A_ , sampler=A_ , batch_size=A_ , collate_fn=ds.collate_fn )
a : List[Any] = []
for batch in tqdm(A_ ):
a : Union[str, Any] = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=A_ , num_beams=A_ , **A_ , )
a : Tuple = tokenizer.batch_decode(A_ , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ )
a : Optional[Any] = batch["ids"]
if num_return_sequences > 1:
a : str = chunks(A_ , A_ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(A_ ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(A_ , A_ )
return results, sampler.num_replicas
def lowercase ( )-> Dict:
'''simple docstring'''
a : List[str] = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=A_ , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=A_ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=A_ , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=A_ , default=A_ )
parser.add_argument(
"--type_path" , type=A_ , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=A_ , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=A_ , default=8 , required=A_ , help="batch size" )
parser.add_argument(
"--local_rank" , type=A_ , default=-1 , required=A_ , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=A_ , default=A_ , required=A_ , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=A_ , default=1 , required=A_ , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=A_ , default=600 , required=A_ , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=A_ , default=A_ , required=A_ )
parser.add_argument("--tgt_lang" , type=A_ , default=A_ , required=A_ )
parser.add_argument(
"--prefix" , type=A_ , required=A_ , default=A_ , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
a : Dict = time.time()
a , a : List[str] = parser.parse_known_args()
a : Optional[Any] = parse_numeric_n_bool_cl_kwargs(A_ )
if generate_kwargs and args.local_rank <= 0:
print(F'''parsed the following generate kwargs: {generate_kwargs}''' )
a : List[Any] = Path(args.save_dir + "_tmp" )
Path(A_ ).mkdir(exist_ok=A_ ) # this handles locking.
a : List[str] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F'''Found files at {json_save_dir} please move or remove them.''' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
a : Any = {}
if args.src_lang is not None:
a : int = args.src_lang
if args.tgt_lang is not None:
a : Optional[Any] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=A_ )
a , a : Optional[Any] = eval_data_dir(
args.data_dir , A_ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=A_ , **A_ , )
if args.local_rank <= 0:
a : Optional[int] = Path(args.save_dir )
save_dir.mkdir(exist_ok=A_ )
a : Union[str, Any] = gather_results_from_each_node(A_ , A_ , args.sync_timeout )
a : Union[str, Any] = combine_partial_results(A_ )
if args.num_return_sequences > 1:
a : List[Any] = save_dir.joinpath("pseudolabel_results.json" )
print(F'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' )
save_json(A_ , A_ )
return
a : Any = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(A_ ) as f:
a : Optional[Any] = [x.rstrip() for x in f.readlines()][: len(A_ )]
# Calculate metrics, save metrics, and save _generations.txt
a : Optional[Any] = "translation" in args.task
a : Optional[int] = calculate_bleu if calc_bleu else calculate_rouge
a : List[str] = "bleu" if calc_bleu else "rouge"
a : Dict = score_fn(A_ , A_ )
a : List[Any] = len(A_ )
a : Union[str, Any] = time.time() - start_time
a : Optional[Any] = round(runtime / metrics["n_obs"] , 4 )
a : str = num_replicas
# TODO(@stas00): add whatever metadata to metrics
a : str = save_dir.joinpath(F'''{args.type_path}_{metric_name}.json''' )
save_json(A_ , A_ , indent=A_ )
print(A_ )
write_txt_file(A_ , save_dir.joinpath(F'''{args.type_path}_generations.txt''' ) )
if args.debug:
write_txt_file(A_ , save_dir.joinpath(F'''{args.type_path}.target''' ) )
else:
shutil.rmtree(A_ )
def lowercase ( A_ )-> List:
'''simple docstring'''
a : int = []
for partial_result in partial_results:
records.extend(A_ )
a : List[str] = sorted(A_ , key=lambda A_ : x["id"] )
a : List[str] = [x["pred"] for x in records]
return preds
def lowercase ( A_ , A_ , A_ )-> List[Dict[str, List]]:
'''simple docstring'''
a : Optional[int] = time.time()
logger.info("waiting for all nodes to finish" )
a : Optional[Any] = None
while (time.time() - start_wait) < timeout:
a : Optional[int] = list(save_dir.glob("rank_*.json" ) )
if len(A_ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
a : Optional[Any] = lmap(A_ , A_ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 226 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def lowercase ( A_ )-> Optional[Any]:
'''simple docstring'''
a : Optional[Any] = 384
if "tiny" in model_name:
a : List[str] = [3, 3, 9, 3]
a : Optional[Any] = [96, 192, 384, 768]
if "small" in model_name:
a : Tuple = [3, 3, 27, 3]
a : str = [96, 192, 384, 768]
if "base" in model_name:
a : Union[str, Any] = [3, 3, 27, 3]
a : Dict = [128, 256, 512, 1_024]
a : Any = 512
if "large" in model_name:
a : Optional[Any] = [3, 3, 27, 3]
a : str = [192, 384, 768, 1_536]
a : Dict = 768
if "xlarge" in model_name:
a : str = [3, 3, 27, 3]
a : List[Any] = [256, 512, 1_024, 2_048]
a : List[Any] = 1_024
# set label information
a : int = 150
a : str = "huggingface/label-files"
a : Tuple = "ade20k-id2label.json"
a : Dict = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : int = {int(A_ ): v for k, v in idalabel.items()}
a : List[Any] = {v: k for k, v in idalabel.items()}
a : Optional[int] = ConvNextConfig(
depths=A_ , hidden_sizes=A_ , out_features=["stage1", "stage2", "stage3", "stage4"] )
a : Tuple = UperNetConfig(
backbone_config=A_ , auxiliary_in_channels=A_ , num_labels=A_ , idalabel=A_ , labelaid=A_ , )
return config
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
a : int = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def lowercase ( A_ , A_ , A_ )-> str:
'''simple docstring'''
a : str = dct.pop(A_ )
a : str = val
def lowercase ( A_ , A_ , A_ )-> int:
'''simple docstring'''
a : Any = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
a : Tuple = model_name_to_url[model_name]
a : int = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" )["state_dict"]
a : Optional[Any] = get_upernet_config(A_ )
a : int = UperNetForSemanticSegmentation(A_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
a : Tuple = state_dict.pop(A_ )
if "bn" in key:
a : Optional[int] = key.replace("bn" , "batch_norm" )
a : Any = val
# rename keys
a : Any = create_rename_keys(A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
model.load_state_dict(A_ )
# verify on image
a : Dict = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
a : Any = Image.open(requests.get(A_ , stream=A_ ).raw ).convert("RGB" )
a : Union[str, Any] = SegformerImageProcessor()
a : Dict = processor(A_ , return_tensors="pt" ).pixel_values
with torch.no_grad():
a : Any = model(A_ )
if model_name == "upernet-convnext-tiny":
a : List[str] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
a : int = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
a : Union[str, Any] = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
a : Union[str, Any] = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
a : str = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , A_ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A_ )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowercase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 226 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =state_dict.pop(A__ )
SCREAMING_SNAKE_CASE =val
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE =key.replace('backbone.0.body', 'backbone.conv_encoder.model' )
SCREAMING_SNAKE_CASE =value
else:
SCREAMING_SNAKE_CASE =value
return new_state_dict
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE =state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
SCREAMING_SNAKE_CASE =state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE =in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE =in_proj_bias[:256]
SCREAMING_SNAKE_CASE =in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE =in_proj_bias[256:512]
SCREAMING_SNAKE_CASE =in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE =in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE =state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
SCREAMING_SNAKE_CASE =state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE =in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE =in_proj_bias[:256]
SCREAMING_SNAKE_CASE =in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE =in_proj_bias[256:512]
SCREAMING_SNAKE_CASE =in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE =in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE =state_dict.pop(
F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
SCREAMING_SNAKE_CASE =state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE =in_proj_weight_cross_attn[:256, :]
SCREAMING_SNAKE_CASE =in_proj_bias_cross_attn[:256]
SCREAMING_SNAKE_CASE =in_proj_weight_cross_attn[256:512, :]
SCREAMING_SNAKE_CASE =in_proj_bias_cross_attn[256:512]
SCREAMING_SNAKE_CASE =in_proj_weight_cross_attn[-256:, :]
SCREAMING_SNAKE_CASE =in_proj_bias_cross_attn[-256:]
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =image.size
SCREAMING_SNAKE_CASE =max(A__, A__ )
SCREAMING_SNAKE_CASE =800 if 'detection' in checkpoint_url else 1000
SCREAMING_SNAKE_CASE =target_max_size / current_max_size
SCREAMING_SNAKE_CASE =image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =F.to_tensor(A__ )
SCREAMING_SNAKE_CASE =F.normalize(A__, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
logger.info('Converting model...' )
# load original state dict
SCREAMING_SNAKE_CASE =torch.hub.load_state_dict_from_url(A__, map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(A__, A__, A__ )
SCREAMING_SNAKE_CASE =rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE ='model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
SCREAMING_SNAKE_CASE =state_dict.pop(A__ )
SCREAMING_SNAKE_CASE =val
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE =TableTransformerConfig(
backbone='resnet18', mask_loss_coefficient=1, dice_loss_coefficient=1, ce_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.4, class_cost=1, bbox_cost=5, giou_cost=2, )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE =15
SCREAMING_SNAKE_CASE =2
SCREAMING_SNAKE_CASE ={0: 'table', 1: 'table rotated'}
SCREAMING_SNAKE_CASE =idalabel
SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
else:
SCREAMING_SNAKE_CASE =125
SCREAMING_SNAKE_CASE =6
SCREAMING_SNAKE_CASE ={
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
SCREAMING_SNAKE_CASE =idalabel
SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =DetrImageProcessor(
format='coco_detection', max_size=800 if 'detection' in checkpoint_url else 1000 )
SCREAMING_SNAKE_CASE =TableTransformerForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
# verify our conversion
SCREAMING_SNAKE_CASE ='example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
SCREAMING_SNAKE_CASE =hf_hub_download(repo_id='nielsr/example-pdf', repo_type='dataset', filename=A__ )
SCREAMING_SNAKE_CASE =Image.open(A__ ).convert('RGB' )
SCREAMING_SNAKE_CASE =normalize(resize(A__, A__ ) ).unsqueeze(0 )
SCREAMING_SNAKE_CASE =model(A__ )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE =(1, 15, 3)
SCREAMING_SNAKE_CASE =torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
SCREAMING_SNAKE_CASE =torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
SCREAMING_SNAKE_CASE =(1, 125, 7)
SCREAMING_SNAKE_CASE =torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
SCREAMING_SNAKE_CASE =torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3], A__, atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3], A__, atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
SCREAMING_SNAKE_CASE =(
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(A__ )
image_processor.push_to_hub(A__ )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCamelCase =parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 334 | import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase__ : Union[str, Any] = '''http://www.mocksite.com/file1.txt'''
lowerCAmelCase__ : Optional[Any] = '''"text": ["foo", "foo"]'''
lowerCAmelCase__ : List[str] = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class __snake_case :
__lowerCamelCase = 200
__lowerCamelCase = {"""Content-Length""": """100"""}
__lowerCamelCase = {}
def __a ( self , **__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return [bytes(__UpperCamelCase , 'utf-8' )]
def UpperCamelCase__ ( *A__ , **A__ ) -> Optional[Any]:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Any:
import requests
monkeypatch.setattr(A__ , 'request' , A__ )
snake_case__ : Any = URL
if issubclass(A__ , A__ ):
snake_case__ : Optional[Any] = url
elif issubclass(A__ , A__ ):
snake_case__ : Dict = [url]
elif issubclass(A__ , A__ ):
snake_case__ : Any = {'train': url}
snake_case__ : Union[str, Any] = 'dummy'
snake_case__ : List[str] = 'downloads'
snake_case__ : int = tmp_path
snake_case__ : Tuple = DownloadConfig(
cache_dir=os.path.join(A__ , A__ ) , use_etag=A__ , )
snake_case__ : Any = DownloadManager(dataset_name=A__ , download_config=A__ )
snake_case__ : Any = dl_manager.download(A__ )
snake_case__ : Dict = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(A__ , A__ ):
snake_case__ : int = [downloaded_paths]
snake_case__ : Any = [urls]
elif isinstance(A__ , A__ ):
assert "train" in downloaded_paths.keys()
snake_case__ : Union[str, Any] = downloaded_paths.values()
snake_case__ : Any = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(A__ , A__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
snake_case__ : int = Path(A__ )
snake_case__ : Optional[int] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
snake_case__ : Optional[Any] = downloaded_path.read_text()
assert content == CONTENT
snake_case__ : int = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
snake_case__ : int = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Any:
snake_case__ : Tuple = str(A__ )
if issubclass(A__ , A__ ):
snake_case__ : Dict = filename
elif issubclass(A__ , A__ ):
snake_case__ : Any = [filename]
elif issubclass(A__ , A__ ):
snake_case__ : Dict = {'train': filename}
snake_case__ : Union[str, Any] = 'dummy'
snake_case__ : List[Any] = xz_file.parent
snake_case__ : Dict = 'extracted'
snake_case__ : List[Any] = DownloadConfig(
cache_dir=A__ , use_etag=A__ , )
snake_case__ : Optional[int] = DownloadManager(dataset_name=A__ , download_config=A__ )
snake_case__ : Optional[Any] = dl_manager.extract(A__ )
snake_case__ : Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(A__ , A__ ):
snake_case__ : str = [extracted_paths]
snake_case__ : Dict = [paths]
elif isinstance(A__ , A__ ):
assert "train" in extracted_paths.keys()
snake_case__ : Any = extracted_paths.values()
snake_case__ : Dict = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(A__ , A__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
snake_case__ : Optional[int] = Path(A__ )
snake_case__ : Any = extracted_path.parts
assert parts[-1] == hash_url_to_filename(A__ , etag=A__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
snake_case__ : Dict = extracted_path.read_text()
snake_case__ : Union[str, Any] = text_file.read_text()
assert extracted_file_content == expected_file_content
def UpperCamelCase__ ( A__ , A__ ) -> Union[str, Any]:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(A__ , start=1 ):
snake_case__ : Optional[int] = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def UpperCamelCase__ ( A__ , A__ ) -> Optional[Any]:
snake_case__ : Tuple = request.getfixturevalue(A__ )
snake_case__ : Optional[int] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
_test_jsonl(A__ , A__ )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def UpperCamelCase__ ( A__ , A__ ) -> int:
snake_case__ : List[Any] = request.getfixturevalue(A__ )
snake_case__ : str = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
_test_jsonl(A__ , A__ )
assert num_tar == 1
assert num_jsonl == 2
def UpperCamelCase__ ( A__ ) -> Union[str, Any]:
snake_case__ : Dict = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(A__ ) , start=1 ):
assert os.path.basename(A__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 143 | 0 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowerCamelCase__ = parse(importlib.metadata.version("""torch"""))
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}" )
__lowerCAmelCase : Union[str, Any] = STR_OPERATION_TO_FUNC[operation]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = parse(importlib.metadata.version(_UpperCamelCase ) )
return operation(_UpperCamelCase , parse(_UpperCamelCase ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
return compare_versions(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) | 352 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[str] = str(_UpperCamelCase )
return len(_UpperCamelCase ) == 9 and set(_UpperCamelCase ) == set('123456789' )
def __lowerCAmelCase ():
for base_num in range(9999 , 4999 , -1 ):
__lowerCAmelCase : Union[str, Any] = 10_0002 * base_num
if is_9_pandigital(_UpperCamelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
__lowerCAmelCase : Dict = 100_2003 * base_num
if is_9_pandigital(_UpperCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'{solution() = }') | 182 | 0 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=lowercase__ , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=lowercase__ , default=5 )
parser.add_argument('''--batch_size''' , type=lowercase__ , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=lowercase__ , default=1 )
parser.add_argument('''--freeze''' , type=lowercase__ , default=lowercase__ )
parser.add_argument('''--learning_rate''' , type=lowercase__ , default=5e-4 )
parser.add_argument('''--seed''' , type=lowercase__ , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=lowercase__ , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=lowercase__ , default=10 )
parser.add_argument('''--weight_decay''' , type=lowercase__ , default=0.01 )
parser.add_argument('''--output_dir''' , type=lowercase__ , default='''./results''' )
return parser.parse_args()
__lowerCAmelCase : List[Any] =load('accuracy')
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = eval_pred
__SCREAMING_SNAKE_CASE : int = np.argmax(lowercase__ , axis=1 )
return metric.compute(predictions=lowercase__ , references=lowercase__ )
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :Union[str, Any] ) -> None:
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = trainer
def __magic_name__( self :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , **lowerCAmelCase__ :int ) -> Optional[int]:
if control.should_evaluate:
__SCREAMING_SNAKE_CASE : List[str] = deepcopy(lowerCAmelCase__ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : List[Any] = get_args()
set_seed(args.seed )
__SCREAMING_SNAKE_CASE : Optional[int] = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = dataset.train_test_split(test_size=0.2 )
__SCREAMING_SNAKE_CASE : int = train_test['''test'''].train_test_split(test_size=0.5 )
__SCREAMING_SNAKE_CASE : Optional[int] = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
__SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__SCREAMING_SNAKE_CASE : str = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : Any = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = tokenizer(example['''src'''] , truncation=lowercase__ , max_length=1024 )
__SCREAMING_SNAKE_CASE : List[str] = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = train_test_validation.map(
lowercase__ , batched=lowercase__ , remove_columns=train_test_validation['''train'''].column_names , )
__SCREAMING_SNAKE_CASE : Dict = DataCollatorWithPadding(tokenizer=lowercase__ )
__SCREAMING_SNAKE_CASE : Any = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
__SCREAMING_SNAKE_CASE : Any = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=lowercase__ , data_collator=lowercase__ , compute_metrics=lowercase__ , )
print('''Training...''' )
trainer.add_callback(CustomCallback(lowercase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 9 |
from __future__ import annotations
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = 0.00
__SCREAMING_SNAKE_CASE : List[str] = 0
for resistor in resistors:
if resistor <= 0:
__SCREAMING_SNAKE_CASE : Any = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = 0.00
__SCREAMING_SNAKE_CASE : int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__SCREAMING_SNAKE_CASE : Tuple = F'''Resistor at index {index} has a negative value!'''
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 | 1 |
def UpperCamelCase__( UpperCamelCase__ : int = 1_00 )->int:
A__ = (n * (n + 1) // 2) ** 2
A__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 367 |
def UpperCamelCase__( UpperCamelCase__ : str )->str:
A__ = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCamelCase__( UpperCamelCase__ : str )->dict[str, str]:
A__ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
A__ = remove_duplicates(key.upper() )
A__ = len(UpperCamelCase__ )
# First fill cipher with key characters
A__ = {alphabet[i]: char for i, char in enumerate(UpperCamelCase__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(UpperCamelCase__ ) , 26 ):
A__ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
A__ = alphabet[i - offset]
A__ = char
return cipher_alphabet
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : dict[str, str] )->str:
return "".join(cipher_map.get(UpperCamelCase__ , UpperCamelCase__ ) for ch in message.upper() )
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : dict[str, str] )->str:
A__ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(UpperCamelCase__ , UpperCamelCase__ ) for ch in message.upper() )
def UpperCamelCase__( )->None:
A__ = input('''Enter message to encode or decode: ''' ).strip()
A__ = input('''Enter keyword: ''' ).strip()
A__ = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
A__ = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
A__ = create_cipher_map(UpperCamelCase__ )
print(func(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 39 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.